Merge branch 'master' into feature/query-refactoring

This commit is contained in:
javanna 2015-06-25 16:57:42 +02:00 committed by Luca Cavanna
commit 556e43aa84
252 changed files with 4417 additions and 3720 deletions

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.admin.cluster.repositories.put;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.bytes.BytesReference;
@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
@ -231,8 +231,8 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(String repositoryDefinition) {
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
}
@ -255,8 +255,8 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(byte[] repositoryDefinition, int offset, int length) {
try {
return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
@ -269,8 +269,8 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(BytesReference repositoryDefinition) {
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.shards;
import org.elasticsearch.cluster.routing.ImmutableShardRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -71,7 +70,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
shardId = in.readVInt();
shards = new ShardRouting[in.readVInt()];
for (int i = 0; i < shards.length; i++) {
shards[i] = ImmutableShardRouting.readShardRoutingEntry(in, index, shardId);
shards[i] = ShardRouting.readShardRoutingEntry(in, index, shardId);
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
@ -402,8 +403,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
*/
public CreateSnapshotRequest source(String source) {
if (hasLength(source)) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (Exception e) {
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
@ -431,8 +432,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
*/
public CreateSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
@ -447,8 +448,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
* @return this request
*/
public CreateSnapshotRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse snapshot source", e);
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
@ -553,8 +554,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
*/
public RestoreSnapshotRequest source(String source) {
if (hasLength(source)) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (Exception e) {
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
@ -586,8 +587,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
*/
public RestoreSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
@ -604,8 +605,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
* @return this request
*/
public RestoreSnapshotRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}

View File

@ -41,6 +41,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final TransportMessage originalMessage;
private final String cause;
private final String index;
private final boolean updateAllTypes;
private IndexMetaData.State state = IndexMetaData.State.OPEN;
@ -55,10 +56,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final Set<ClusterBlock> blocks = Sets.newHashSet();
CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index) {
CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
this.originalMessage = originalMessage;
this.cause = cause;
this.index = index;
this.updateAllTypes = updateAllTypes;
}
public CreateIndexClusterStateUpdateRequest settings(Settings settings) {
@ -126,4 +128,9 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
public Set<ClusterBlock> blocks() {
return blocks;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
}

View File

@ -72,6 +72,8 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
private final Map<String, IndexMetaData.Custom> customs = newHashMap();
private boolean updateAllTypes = false;
CreateIndexRequest() {
}
@ -362,8 +364,8 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
public CreateIndexRequest source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
if (xContentType != null) {
try {
source(XContentFactory.xContent(xContentType).createParser(source).mapAndClose());
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) {
source(parser.map());
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse source for create index", e);
}
@ -433,6 +435,17 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this.customs;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public CreateIndexRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -454,6 +467,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (int i = 0; i < aliasesSize; i++) {
aliases.add(Alias.read(in));
}
updateAllTypes = in.readBoolean();
}
@Override
@ -477,5 +491,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (Alias alias : aliases) {
alias.writeTo(out);
}
out.writeBoolean(updateAllTypes);
}
}

View File

@ -243,4 +243,10 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
request.source(source);
return this;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
}

View File

@ -71,7 +71,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
cause = "api";
}
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, request.index())
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, request.index(), request.updateAllTypes())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs());

View File

@ -30,6 +30,8 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
private String source;
private boolean updateAllTypes = false;
PutMappingClusterStateUpdateRequest() {
}
@ -51,4 +53,13 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
this.source = source;
return this;
}
public boolean updateAllTypes() {
return updateAllTypes;
}
public PutMappingClusterStateUpdateRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
}

View File

@ -63,6 +63,8 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source;
private boolean updateAllTypes = false;
PutMappingRequest() {
}
@ -236,6 +238,17 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
return this;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public PutMappingRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -243,6 +256,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
indicesOptions = IndicesOptions.readIndicesOptions(in);
type = in.readOptionalString();
source = in.readString();
updateAllTypes = in.readBoolean();
readTimeout(in);
}
@ -253,6 +267,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(type);
out.writeString(source);
out.writeBoolean(updateAllTypes);
writeTimeout(out);
}
}

View File

@ -91,4 +91,10 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public PutMappingRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
}

View File

@ -69,6 +69,7 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type())
.updateAllTypes(request.updateAllTypes())
.source(request.source());
metaDataMappingService.putMapping(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

View File

@ -31,7 +31,7 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
public class ShardSegments extends BroadcastShardResponse implements Iterable<Segment> {

View File

@ -32,7 +32,7 @@ import org.elasticsearch.index.shard.IndexShard;
import java.io.IOException;
import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
/**
*/

View File

@ -308,8 +308,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
* The template source definition.
*/
public PutIndexTemplateRequest source(String templateSource) {
try {
return source(XContentFactory.xContent(templateSource).createParser(templateSource).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(templateSource).createParser(templateSource)) {
return source(parser.mapOrdered());
} catch (Exception e) {
throw new IllegalArgumentException("failed to parse template source [" + templateSource + "]", e);
}
@ -326,8 +326,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
* The template source definition.
*/
public PutIndexTemplateRequest source(byte[] source, int offset, int length) {
try {
return source(XContentFactory.xContent(source, offset, length).createParser(source, offset, length).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}
@ -337,8 +337,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
* The template source definition.
*/
public PutIndexTemplateRequest source(BytesReference source) {
try {
return source(XContentFactory.xContent(source).createParser(source).mapOrderedAndClose());
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
public class ShardUpgradeStatus extends BroadcastShardResponse {

View File

@ -42,7 +42,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicReferenceArray;

View File

@ -38,7 +38,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexShard;

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.search;
import com.google.common.collect.Iterators;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
@ -31,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.common.xcontent.XContentFactory;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
/**
@ -43,22 +45,22 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
*/
public static class Item implements Streamable {
private SearchResponse response;
private String failureMessage;
private Throwable throwable;
Item() {
}
public Item(SearchResponse response, String failureMessage) {
public Item(SearchResponse response, Throwable throwable) {
this.response = response;
this.failureMessage = failureMessage;
this.throwable = throwable;
}
/**
* Is it a failed search?
*/
public boolean isFailure() {
return failureMessage != null;
return throwable != null;
}
/**
@ -66,7 +68,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
*/
@Nullable
public String getFailureMessage() {
return failureMessage;
return throwable == null ? null : throwable.getMessage();
}
/**
@ -89,7 +91,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
this.response = new SearchResponse();
response.readFrom(in);
} else {
failureMessage = in.readString();
throwable = in.readThrowable();
}
}
@ -100,9 +102,13 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
response.writeTo(out);
} else {
out.writeBoolean(false);
out.writeString(failureMessage);
out.writeThrowable(throwable);
}
}
public Throwable getFailure() {
return throwable;
}
}
private Item[] items;
@ -150,7 +156,19 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
for (Item item : items) {
if (item.isFailure()) {
builder.startObject();
builder.field(Fields.ERROR, item.getFailureMessage());
builder.startObject(Fields.ERROR);
final Throwable t = item.getFailure();
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
builder.field(Fields.ROOT_CAUSE);
builder.startArray();
for (ElasticsearchException rootCause : rootCauses){
builder.startObject();
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
builder.endObject();
}
builder.endArray();
ElasticsearchException.toXContent(builder, params, t);
builder.endObject();
builder.endObject();
} else {
builder.startObject();
@ -165,6 +183,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
static final class Fields {
static final XContentBuilderString RESPONSES = new XContentBuilderString("responses");
static final XContentBuilderString ERROR = new XContentBuilderString("error");
static final XContentBuilderString ROOT_CAUSE = new XContentBuilderString("root_cause");
}
@Override

View File

@ -69,7 +69,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
@Override
public void onFailure(Throwable e) {
responses.set(index, new MultiSearchResponse.Item(null, ExceptionsHelper.detailedMessage(e)));
responses.set(index, new MultiSearchResponse.Item(null, e));
if (counter.decrementAndGet() == 0) {
finishHim();
}

View File

@ -43,7 +43,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings;

View File

@ -122,13 +122,24 @@ final class JVMCheck {
}
}
} else if ("IBM Corporation".equals(Constants.JVM_VENDOR)) {
// currently any JVM from IBM will easily result in index corruption.
StringBuilder sb = new StringBuilder();
sb.append("IBM runtimes suffer from several bugs which can cause data corruption.");
sb.append(System.lineSeparator());
sb.append("Please upgrade the JVM, see ").append(JVM_RECOMMENDATIONS);
sb.append(" for current recommendations.");
throw new RuntimeException(sb.toString());
// currently some old JVM versions from IBM will easily result in index corruption.
// 2.8+ seems ok for ES from testing.
float version = Float.POSITIVE_INFINITY;
try {
version = Float.parseFloat(Constants.JVM_VERSION);
} catch (NumberFormatException ignored) {
// this is just a simple best-effort to detect old runtimes,
// if we cannot parse it, we don't fail.
}
if (version < 2.8f) {
StringBuilder sb = new StringBuilder();
sb.append("IBM J9 runtimes < 2.8 suffer from several bugs which can cause data corruption.");
sb.append(System.lineSeparator());
sb.append("Your version: " + Constants.JVM_VERSION);
sb.append(System.lineSeparator());
sb.append("Please upgrade the JVM to a recent IBM JDK");
throw new RuntimeException(sb.toString());
}
}
}
}

View File

@ -45,7 +45,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
/**
*

View File

@ -81,9 +81,8 @@ public class AliasValidator extends AbstractComponent {
public void validateAliasStandalone(Alias alias) {
validateAliasStandalone(alias.name(), alias.indexRouting());
if (Strings.hasLength(alias.filter())) {
try {
XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter());
parser.mapAndClose();
try (XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter())) {
parser.map();
} catch (Throwable e) {
throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e);
}

View File

@ -343,9 +343,10 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
builder.startArray("mappings");
for (ObjectObjectCursor<String, CompressedXContent> cursor : indexTemplateMetaData.mappings()) {
byte[] data = cursor.value.uncompressed();
XContentParser parser = XContentFactory.xContent(data).createParser(data);
Map<String, Object> mapping = parser.mapOrderedAndClose();
builder.map(mapping);
try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
Map<String, Object> mapping = parser.mapOrdered();
builder.map(mapping);
}
}
builder.endArray();
}

View File

@ -288,7 +288,10 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
public MappingMetaData(CompressedXContent mapping) throws IOException {
this.source = mapping;
Map<String, Object> mappingMap = XContentHelper.createParser(mapping.compressedReference()).mapOrderedAndClose();
Map<String, Object> mappingMap;
try (XContentParser parser = XContentHelper.createParser(mapping.compressedReference())) {
mappingMap = parser.mapOrdered();
}
if (mappingMap.size() != 1) {
throw new IllegalStateException("Can't derive type from mapping, no root type: " + mapping.string());
}

View File

@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -53,6 +54,7 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.DocumentMapper;
@ -345,7 +347,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
try {
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false);
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing default mapping on index creation";
throw new MapperParsingException("mapping [" + MapperService.DEFAULT_MAPPING + "]", e);
@ -357,7 +359,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
try {
// apply the default here, its the first time we parse it
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true);
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing mappings on index creation";
throw new MapperParsingException("mapping [" + entry.getKey() + "]", e);
@ -451,7 +453,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
private Map<String, Object> parseMapping(String mappingSource) throws Exception {
return XContentFactory.xContent(mappingSource).createParser(mappingSource).mapAndClose();
try (XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource)) {
return parser.map();
}
}
private void addMappings(Map<String, Map<String, Object>> mappings, Path mappingsDir) throws IOException {

View File

@ -101,11 +101,11 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
try {
indexService = indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), clusterService.localNode().id());
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
}
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false);
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
}
} catch (Exception e) {
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.index());

View File

@ -161,6 +161,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
/** All known time settings for an index. */
public static final Set<String> INDEX_TIME_SETTINGS = ImmutableSet.of(
"index.gateway.wait_for_mapping_update_post_recovery",
"index.shard.wait_for_mapping_update_post_recovery",
"index.gc_deletes",
"index.indexing.slowlog.threshold.index.debug",
"index.indexing.slowlog.threshold.index.info",

View File

@ -193,7 +193,7 @@ public class MetaDataMappingService extends AbstractComponent {
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(type)) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source(), false);
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source(), false, true);
}
}
}
@ -264,7 +264,7 @@ public class MetaDataMappingService extends AbstractComponent {
continue;
}
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false);
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
processedRefreshes.add(type);
// if we end up with the same mapping as the original once, ignore
@ -361,11 +361,11 @@ public class MetaDataMappingService extends AbstractComponent {
indicesToClose.add(indexMetaData.index());
// make sure to add custom default mapping if exists
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false);
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
}
// only add the current relevant mapping (if exists)
if (indexMetaData.mappings().containsKey(request.type())) {
indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false);
indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false, request.updateAllTypes());
}
}
@ -383,7 +383,7 @@ public class MetaDataMappingService extends AbstractComponent {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
if (existingMapper != null) {
// first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true);
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
// if we have conflicts, throw an exception
if (mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.buildConflicts());
@ -438,7 +438,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false);
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {

View File

@ -1,396 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.io.Serializable;
/**
* {@link ImmutableShardRouting} immutably encapsulates information about shard
* routings like id, state, version, etc.
*/
public class ImmutableShardRouting implements Streamable, Serializable, ShardRouting {
protected String index;
protected int shardId;
protected String currentNodeId;
protected String relocatingNodeId;
protected boolean primary;
protected ShardRoutingState state;
protected long version;
private transient ShardId shardIdentifier;
protected RestoreSource restoreSource;
protected UnassignedInfo unassignedInfo;
private final transient ImmutableList<ShardRouting> asList;
ImmutableShardRouting() {
this.asList = ImmutableList.of((ShardRouting) this);
}
public ImmutableShardRouting(ShardRouting copy) {
this(copy, copy.version());
}
public ImmutableShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo());
}
public ImmutableShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, null, primary, state, version);
}
public ImmutableShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version);
}
public ImmutableShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, null);
}
public ImmutableShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
UnassignedInfo unassignedInfo) {
this.index = index;
this.shardId = shardId;
this.currentNodeId = currentNodeId;
this.relocatingNodeId = relocatingNodeId;
this.primary = primary;
this.state = state;
this.asList = ImmutableList.of((ShardRouting) this);
this.version = version;
this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
}
@Override
public String index() {
return this.index;
}
@Override
public String getIndex() {
return index();
}
@Override
public int id() {
return this.shardId;
}
@Override
public int getId() {
return id();
}
@Override
public long version() {
return this.version;
}
@Override
public boolean unassigned() {
return state == ShardRoutingState.UNASSIGNED;
}
@Override
public boolean initializing() {
return state == ShardRoutingState.INITIALIZING;
}
@Override
public boolean active() {
return started() || relocating();
}
@Override
public boolean started() {
return state == ShardRoutingState.STARTED;
}
@Override
public boolean relocating() {
return state == ShardRoutingState.RELOCATING;
}
@Override
public boolean assignedToNode() {
return currentNodeId != null;
}
@Override
public String currentNodeId() {
return this.currentNodeId;
}
@Override
public String relocatingNodeId() {
return this.relocatingNodeId;
}
@Override
public ShardRouting targetRoutingIfRelocating() {
if (!relocating()) {
return null;
}
return new ImmutableShardRouting(index, shardId, relocatingNodeId, currentNodeId, primary, ShardRoutingState.INITIALIZING, version);
}
@Override
public RestoreSource restoreSource() {
return restoreSource;
}
@Override
@Nullable
public UnassignedInfo unassignedInfo() {
return unassignedInfo;
}
@Override
public boolean primary() {
return this.primary;
}
@Override
public ShardRoutingState state() {
return this.state;
}
@Override
public ShardId shardId() {
if (shardIdentifier != null) {
return shardIdentifier;
}
shardIdentifier = new ShardId(index, shardId);
return shardIdentifier;
}
@Override
public ShardIterator shardsIt() {
return new PlainShardIterator(shardId(), asList);
}
public static ImmutableShardRouting readShardRoutingEntry(StreamInput in) throws IOException {
ImmutableShardRouting entry = new ImmutableShardRouting();
entry.readFrom(in);
return entry;
}
public static ImmutableShardRouting readShardRoutingEntry(StreamInput in, String index, int shardId) throws IOException {
ImmutableShardRouting entry = new ImmutableShardRouting();
entry.readFrom(in, index, shardId);
return entry;
}
public void readFrom(StreamInput in, String index, int shardId) throws IOException {
this.index = index;
this.shardId = shardId;
readFromThin(in);
}
@Override
public void readFromThin(StreamInput in) throws IOException {
version = in.readLong();
if (in.readBoolean()) {
currentNodeId = in.readString();
}
if (in.readBoolean()) {
relocatingNodeId = in.readString();
}
primary = in.readBoolean();
state = ShardRoutingState.fromValue(in.readByte());
restoreSource = RestoreSource.readOptionalRestoreSource(in);
if (in.readBoolean()) {
unassignedInfo = new UnassignedInfo(in);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
readFrom(in, in.readString(), in.readVInt());
}
/**
* Writes shard information to {@link StreamOutput} without writing index name and shard id
*
* @param out {@link StreamOutput} to write shard information to
* @throws IOException if something happens during write
*/
@Override
public void writeToThin(StreamOutput out) throws IOException {
out.writeLong(version);
if (currentNodeId != null) {
out.writeBoolean(true);
out.writeString(currentNodeId);
} else {
out.writeBoolean(false);
}
if (relocatingNodeId != null) {
out.writeBoolean(true);
out.writeString(relocatingNodeId);
} else {
out.writeBoolean(false);
}
out.writeBoolean(primary);
out.writeByte(state.value());
if (restoreSource != null) {
out.writeBoolean(true);
restoreSource.writeTo(out);
} else {
out.writeBoolean(false);
}
if (unassignedInfo != null) {
out.writeBoolean(true);
unassignedInfo.writeTo(out);
} else {
out.writeBoolean(false);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeVInt(shardId);
writeToThin(out);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
// we check on instanceof so we also handle the MutableShardRouting case as well
if (o == null || !(o instanceof ImmutableShardRouting)) {
return false;
}
ImmutableShardRouting that = (ImmutableShardRouting) o;
if (primary != that.primary) {
return false;
}
if (shardId != that.shardId) {
return false;
}
if (currentNodeId != null ? !currentNodeId.equals(that.currentNodeId) : that.currentNodeId != null) {
return false;
}
if (index != null ? !index.equals(that.index) : that.index != null) {
return false;
}
if (relocatingNodeId != null ? !relocatingNodeId.equals(that.relocatingNodeId) : that.relocatingNodeId != null) {
return false;
}
if (state != that.state) {
return false;
}
if (restoreSource != null ? !restoreSource.equals(that.restoreSource) : that.restoreSource != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = index != null ? index.hashCode() : 0;
result = 31 * result + shardId;
result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0);
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
result = 31 * result + (state != null ? state.hashCode() : 0);
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
return result;
}
@Override
public String toString() {
return shortSummary();
}
@Override
public String shortSummary() {
StringBuilder sb = new StringBuilder();
sb.append('[').append(index).append(']').append('[').append(shardId).append(']');
sb.append(", node[").append(currentNodeId).append("], ");
if (relocatingNodeId != null) {
sb.append("relocating [").append(relocatingNodeId).append("], ");
}
if (primary) {
sb.append("[P]");
} else {
sb.append("[R]");
}
if (this.restoreSource != null) {
sb.append(", restoring[" + restoreSource + "]");
}
sb.append(", s[").append(state).append("]");
if (this.unassignedInfo != null) {
sb.append(", ").append(unassignedInfo.toString());
}
return sb.toString();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject()
.field("state", state())
.field("primary", primary())
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", shardId().id())
.field("index", shardId().index().name());
if (restoreSource() != null) {
builder.field("restore_source");
restoreSource().toXContent(builder, params);
}
if (unassignedInfo != null) {
unassignedInfo.toXContent(builder, params);
}
return builder.endObject();
}
}

View File

@ -70,24 +70,21 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
// shards with state set to UNASSIGNED
private final ImmutableOpenIntMap<IndexShardRoutingTable> shards;
private final ImmutableList<ShardRouting> allShards;
private final ImmutableList<ShardRouting> allActiveShards;
private final List<ShardRouting> allActiveShards;
IndexRoutingTable(String index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) {
this.index = index;
this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt());
this.shards = shards;
ImmutableList.Builder<ShardRouting> allShards = ImmutableList.builder();
ImmutableList.Builder<ShardRouting> allActiveShards = ImmutableList.builder();
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
for (ShardRouting shardRouting : cursor.value) {
allShards.add(shardRouting);
shardRouting.freeze();
if (shardRouting.active()) {
allActiveShards.add(shardRouting);
}
}
}
this.allShards = allShards.build();
this.allActiveShards = allActiveShards.build();
}
@ -275,13 +272,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
return shards;
}
/**
* Returns an unordered iterator over all shards (including replicas).
*/
public ShardsIterator randomAllShardsIt() {
return new PlainShardsIterator(shuffler.shuffle(allShards));
}
/**
* Returns an unordered iterator over all active shards (including replicas).
*/
@ -443,9 +433,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
if (asNew && ignoreShards.contains(shardId)) {
// This shards wasn't completely snapshotted - restore it as new shard
indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, null, null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
indexShardRoutingBuilder.addShard(new ShardRouting(index, shardId, null, null, null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
} else {
indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, null, i == 0 ? restoreSource : null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
indexShardRoutingBuilder.addShard(new ShardRouting(index, shardId, null, null, i == 0 ? restoreSource : null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
}
}
shards.put(shardId, indexShardRoutingBuilder.build());
@ -463,7 +453,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true);
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, shardId, null, null, null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
indexShardRoutingBuilder.addShard(new ShardRouting(index, shardId, null, null, null, i == 0, ShardRoutingState.UNASSIGNED, 0, unassignedInfo));
}
shards.put(shardId, indexShardRoutingBuilder.build());
}
@ -474,7 +464,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
for (IntCursor cursor : shards.keys()) {
int shardId = cursor.value;
// version 0, will get updated when reroute will happen
ImmutableShardRouting shard = new ImmutableShardRouting(index, shardId, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
ShardRouting shard = new ShardRouting(index, shardId, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
shards.put(shardId,
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
);
@ -493,7 +483,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
// re-add all the current ones
IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(indexShard.shardId(), indexShard.primaryAllocatedPostApi());
for (ShardRouting shardRouting : indexShard) {
builder.addShard(new ImmutableShardRouting(shardRouting));
builder.addShard(new ShardRouting(shardRouting));
}
// first check if there is one that is not assigned to a node, and remove it
boolean removed = false;
@ -540,9 +530,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
public Builder addShard(IndexShardRoutingTable refData, ShardRouting shard) {
IndexShardRoutingTable indexShard = shards.get(shard.id());
if (indexShard == null) {
indexShard = new IndexShardRoutingTable.Builder(refData.shardId(), refData.primaryAllocatedPostApi()).addShard(new ImmutableShardRouting(shard)).build();
indexShard = new IndexShardRoutingTable.Builder(refData.shardId(), refData.primaryAllocatedPostApi()).addShard(new ShardRouting(shard)).build();
} else {
indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ImmutableShardRouting(shard)).build();
indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ShardRouting(shard)).build();
}
shards.put(indexShard.shardId().id(), indexShard);
return this;

View File

@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import com.google.common.collect.UnmodifiableIterator;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.collect.MapBuilder;
@ -141,7 +140,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
if (shards.get(i).version() == highestVersion) {
shardRoutings.add(shards.get(i));
} else {
shardRoutings.add(new ImmutableShardRouting(shards.get(i), highestVersion));
shardRoutings.add(new ShardRouting(shards.get(i), highestVersion));
}
}
return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shardRoutings), primaryAllocatedPostApi);
@ -559,7 +558,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
this.primaryAllocatedPostApi = primaryAllocatedPostApi;
}
public Builder addShard(ImmutableShardRouting shardEntry) {
public Builder addShard(ShardRouting shardEntry) {
for (ShardRouting shard : shards) {
// don't add two that map to the same node id
// we rely on the fact that a node does not have primary and backup of the same shard
@ -601,7 +600,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
int size = in.readVInt();
for (int i = 0; i < size; i++) {
ImmutableShardRouting shard = ImmutableShardRouting.readShardRoutingEntry(in, index, iShardId);
ShardRouting shard = ShardRouting.readShardRoutingEntry(in, index, iShardId);
builder.addShard(shard);
}

View File

@ -1,160 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing;
/**
* Similar to {@link ImmutableShardRouting} this class keeps metadata of the current shard. But unlike
* {@link ImmutableShardRouting} the information kept in this class can be modified.
* These modifications include changing the primary state, relocating and assigning the shard
* represented by this class
*/
public class MutableShardRouting extends ImmutableShardRouting {
public MutableShardRouting(ShardRouting copy) {
super(copy);
}
public MutableShardRouting(ShardRouting copy, long version) {
super(copy, version);
}
public MutableShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
super(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version);
assert state != ShardRoutingState.UNASSIGNED : "new mutable routing should not be created with UNASSIGNED state, should moveToUnassigned";
}
/**
* Moves the shard to unassigned state.
*/
void moveToUnassigned(UnassignedInfo unassignedInfo) {
version++;
assert state != ShardRoutingState.UNASSIGNED;
state = ShardRoutingState.UNASSIGNED;
currentNodeId = null;
relocatingNodeId = null;
this.unassignedInfo = unassignedInfo;
}
/**
* Assign this shard to a node.
*
* @param nodeId id of the node to assign this shard to
*/
void assignToNode(String nodeId) {
version++;
if (currentNodeId == null) {
assert state == ShardRoutingState.UNASSIGNED;
state = ShardRoutingState.INITIALIZING;
currentNodeId = nodeId;
relocatingNodeId = null;
} else if (state == ShardRoutingState.STARTED) {
state = ShardRoutingState.RELOCATING;
relocatingNodeId = nodeId;
} else if (state == ShardRoutingState.RELOCATING) {
assert nodeId.equals(relocatingNodeId);
}
}
/**
* Relocate the shard to another node.
*
* @param relocatingNodeId id of the node to relocate the shard
*/
void relocate(String relocatingNodeId) {
version++;
assert state == ShardRoutingState.STARTED;
state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId;
}
/**
* Cancel relocation of a shard. The shards state must be set
* to <code>RELOCATING</code>.
*/
void cancelRelocation() {
version++;
assert state == ShardRoutingState.RELOCATING;
assert assignedToNode();
assert relocatingNodeId != null;
state = ShardRoutingState.STARTED;
relocatingNodeId = null;
}
/**
* Moves the shard from started to initializing and bumps the version
*/
void reinitializeShard() {
assert state == ShardRoutingState.STARTED;
version++;
state = ShardRoutingState.INITIALIZING;
}
/**
* Set the shards state to <code>STARTED</code>. The shards state must be
* <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
* canceled.
*/
void moveToStarted() {
version++;
assert state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING;
relocatingNodeId = null;
restoreSource = null;
state = ShardRoutingState.STARTED;
unassignedInfo = null; // we keep the unassigned data until the shard is started
}
/**
* Make the shard primary unless it's not Primary
* //TODO: doc exception
*/
void moveToPrimary() {
version++;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
primary = true;
}
/**
* Set the primary shard to non-primary
*/
void moveFromPrimary() {
version++;
if (!primary) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
primary = false;
}
private long hashVersion = version-1;
private int hashCode = 0;
@Override
public int hashCode() {
hashCode = (hashVersion != version ? super.hashCode() : hashCode);
hashVersion = version;
return hashCode;
}
}

View File

@ -33,30 +33,30 @@ import static com.google.common.collect.Lists.newArrayList;
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
* that are hosted on that nodes. Each {@link RoutingNode} has a unique node id that can be used to identify the node.
*/
public class RoutingNode implements Iterable<MutableShardRouting> {
public class RoutingNode implements Iterable<ShardRouting> {
private final String nodeId;
private final DiscoveryNode node;
private final List<MutableShardRouting> shards;
private final List<ShardRouting> shards;
public RoutingNode(String nodeId, DiscoveryNode node) {
this(nodeId, node, new ArrayList<MutableShardRouting>());
this(nodeId, node, new ArrayList<ShardRouting>());
}
public RoutingNode(String nodeId, DiscoveryNode node, List<MutableShardRouting> shards) {
public RoutingNode(String nodeId, DiscoveryNode node, List<ShardRouting> shards) {
this.nodeId = nodeId;
this.node = node;
this.shards = shards;
}
@Override
public Iterator<MutableShardRouting> iterator() {
public Iterator<ShardRouting> iterator() {
return Iterators.unmodifiableIterator(shards.iterator());
}
Iterator<MutableShardRouting> mutableIterator() {
Iterator<ShardRouting> mutableIterator() {
return shards.iterator();
}
@ -85,9 +85,9 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
* Add a new shard to this node
* @param shard Shard to crate on this Node
*/
void add(MutableShardRouting shard) {
void add(ShardRouting shard) {
// TODO use Set with ShardIds for faster lookup.
for (MutableShardRouting shardRouting : shards) {
for (ShardRouting shardRouting : shards) {
if (shardRouting.shardId().equals(shard.shardId())) {
throw new IllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists");
}
@ -102,7 +102,7 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
*/
public int numberOfShardsWithState(ShardRoutingState... states) {
int count = 0;
for (MutableShardRouting shardEntry : this) {
for (ShardRouting shardEntry : this) {
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
count++;
@ -117,9 +117,9 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
* @param states set of states which should be listed
* @return List of shards
*/
public List<MutableShardRouting> shardsWithState(ShardRoutingState... states) {
List<MutableShardRouting> shards = newArrayList();
for (MutableShardRouting shardEntry : this) {
public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
List<ShardRouting> shards = newArrayList();
for (ShardRouting shardEntry : this) {
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
shards.add(shardEntry);
@ -135,10 +135,10 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
* @param states set of states which should be listed
* @return a list of shards
*/
public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... states) {
List<MutableShardRouting> shards = newArrayList();
public List<ShardRouting> shardsWithState(String index, ShardRoutingState... states) {
List<ShardRouting> shards = newArrayList();
for (MutableShardRouting shardEntry : this) {
for (ShardRouting shardEntry : this) {
if (!shardEntry.index().equals(index)) {
continue;
}
@ -156,7 +156,7 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
*/
public int numberOfOwningShards() {
int count = 0;
for (MutableShardRouting shardEntry : this) {
for (ShardRouting shardEntry : this) {
if (shardEntry.state() != ShardRoutingState.RELOCATING) {
count++;
}
@ -168,7 +168,7 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("-----node_id[").append(nodeId).append("][" + (node == null ? "X" : "V") + "]\n");
for (MutableShardRouting entry : shards) {
for (ShardRouting entry : shards) {
sb.append("--------").append(entry.shortSummary()).append('\n');
}
return sb.toString();
@ -190,11 +190,11 @@ public class RoutingNode implements Iterable<MutableShardRouting> {
return sb.toString();
}
public MutableShardRouting get(int i) {
public ShardRouting get(int i) {
return shards.get(i) ;
}
public Collection<MutableShardRouting> copyShards() {
public Collection<ShardRouting> copyShards() {
return new ArrayList<>(shards);
}

View File

@ -53,9 +53,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
private final UnassignedShards unassignedShards = new UnassignedShards();
private final List<MutableShardRouting> ignoredUnassignedShards = newArrayList();
private final List<ShardRouting> ignoredUnassignedShards = newArrayList();
private final Map<ShardId, List<MutableShardRouting>> assignedShards = newHashMap();
private final Map<ShardId, List<ShardRouting>> assignedShards = newHashMap();
private final ImmutableOpenMap<String, ClusterState.Custom> customs;
@ -75,10 +75,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
this.routingTable = clusterState.routingTable();
this.customs = clusterState.customs();
Map<String, List<MutableShardRouting>> nodesToShards = newHashMap();
Map<String, List<ShardRouting>> nodesToShards = newHashMap();
// fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
nodesToShards.put(cursor.value.id(), new ArrayList<MutableShardRouting>());
nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>());
}
// fill in the inverse of node -> shards allocated
@ -91,12 +91,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) {
List<MutableShardRouting> entries = nodesToShards.get(shard.currentNodeId());
List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId());
if (entries == null) {
entries = newArrayList();
nodesToShards.put(shard.currentNodeId(), entries);
}
MutableShardRouting sr = new MutableShardRouting(shard);
ShardRouting sr = new ShardRouting(shard);
entries.add(sr);
assignedShardsAdd(sr);
if (shard.relocating()) {
@ -108,7 +108,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
// add the counterpart shard with relocatingNodeId reflecting the source from which
// it's relocating from.
sr = new MutableShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(),
sr = new ShardRouting(shard.index(), shard.id(), shard.relocatingNodeId(),
shard.currentNodeId(), shard.restoreSource(), shard.primary(), ShardRoutingState.INITIALIZING, shard.version());
entries.add(sr);
assignedShardsAdd(sr);
@ -119,14 +119,14 @@ public class RoutingNodes implements Iterable<RoutingNode> {
inactiveShardCount++;
}
} else {
MutableShardRouting sr = new MutableShardRouting(shard);
ShardRouting sr = new ShardRouting(shard);
assignedShardsAdd(sr);
unassignedShards.add(sr);
}
}
}
}
for (Map.Entry<String, List<MutableShardRouting>> entry : nodesToShards.entrySet()) {
for (Map.Entry<String, List<ShardRouting>> entry : nodesToShards.entrySet()) {
String nodeId = entry.getKey();
this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue()));
}
@ -185,7 +185,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return !unassignedShards.isEmpty();
}
public List<MutableShardRouting> ignoredUnassigned() {
public List<ShardRouting> ignoredUnassigned() {
return this.ignoredUnassignedShards;
}
@ -258,8 +258,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns the active primary shard for the given ShardRouting or <code>null</code> if
* no primary is found or the primary is not active.
*/
public MutableShardRouting activePrimary(ShardRouting shard) {
for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
public ShardRouting activePrimary(ShardRouting shard) {
for (ShardRouting shardRouting : assignedShards(shard.shardId())) {
if (shardRouting.primary() && shardRouting.active()) {
return shardRouting;
}
@ -271,8 +271,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns one active replica shard for the given ShardRouting shard ID or <code>null</code> if
* no active replica is found.
*/
public MutableShardRouting activeReplica(ShardRouting shard) {
for (MutableShardRouting shardRouting : assignedShards(shard.shardId())) {
public ShardRouting activeReplica(ShardRouting shard) {
for (ShardRouting shardRouting : assignedShards(shard.shardId())) {
if (!shardRouting.primary() && shardRouting.active()) {
return shardRouting;
}
@ -284,7 +284,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns all shards that are not in the state UNASSIGNED with the same shard
* ID as the given shard.
*/
public Iterable<MutableShardRouting> assignedShards(ShardRouting shard) {
public Iterable<ShardRouting> assignedShards(ShardRouting shard) {
return assignedShards(shard.shardId());
}
@ -292,11 +292,11 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Returns <code>true</code> iff all replicas are active for the given shard routing. Otherwise <code>false</code>
*/
public boolean allReplicasActive(ShardRouting shardRouting) {
final List<MutableShardRouting> shards = assignedShards(shardRouting.shardId());
final List<ShardRouting> shards = assignedShards(shardRouting.shardId());
if (shards.isEmpty() || shards.size() < this.routingTable.index(shardRouting.index()).shard(shardRouting.id()).size()) {
return false; // if we are empty nothing is active if we have less than total at least one is unassigned
}
for (MutableShardRouting shard : shards) {
for (ShardRouting shard : shards) {
if (!shard.active()) {
return false;
}
@ -304,10 +304,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return true;
}
public List<MutableShardRouting> shards(Predicate<MutableShardRouting> predicate) {
List<MutableShardRouting> shards = newArrayList();
public List<ShardRouting> shards(Predicate<ShardRouting> predicate) {
List<ShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
for (MutableShardRouting shardRouting : routingNode) {
for (ShardRouting shardRouting : routingNode) {
if (predicate.apply(shardRouting)) {
shards.add(shardRouting);
}
@ -316,9 +316,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return shards;
}
public List<MutableShardRouting> shardsWithState(ShardRoutingState... state) {
public List<ShardRouting> shardsWithState(ShardRoutingState... state) {
// TODO these are used on tests only - move into utils class
List<MutableShardRouting> shards = newArrayList();
List<ShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
shards.addAll(routingNode.shardsWithState(state));
}
@ -331,15 +331,15 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return shards;
}
public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... state) {
public List<ShardRouting> shardsWithState(String index, ShardRoutingState... state) {
// TODO these are used on tests only - move into utils class
List<MutableShardRouting> shards = newArrayList();
List<ShardRouting> shards = newArrayList();
for (RoutingNode routingNode : this) {
shards.addAll(routingNode.shardsWithState(index, state));
}
for (ShardRoutingState s : state) {
if (s == ShardRoutingState.UNASSIGNED) {
for (MutableShardRouting unassignedShard : unassignedShards) {
for (ShardRouting unassignedShard : unassignedShards) {
if (unassignedShard.index().equals(index)) {
shards.add(unassignedShard);
}
@ -356,7 +356,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
sb.append(routingNode.prettyPrint());
}
sb.append("---- unassigned\n");
for (MutableShardRouting shardEntry : unassignedShards) {
for (ShardRouting shardEntry : unassignedShards) {
sb.append("--------").append(shardEntry.shortSummary()).append('\n');
}
return sb.toString();
@ -379,7 +379,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* @param shard the shard to be assigned
* @param nodeId the nodeId this shard should initialize on or relocate from
*/
public void assign(MutableShardRouting shard, String nodeId) {
public void assign(ShardRouting shard, String nodeId) {
// state will not change if the shard is already initializing.
ShardRoutingState oldState = shard.state();
shard.assignToNode(nodeId);
@ -400,7 +400,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Relocate a shard to another node.
*/
public void relocate(MutableShardRouting shard, String nodeId) {
public void relocate(ShardRouting shard, String nodeId) {
relocatingShards++;
shard.relocate(nodeId);
}
@ -408,7 +408,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Mark a shard as started and adjusts internal statistics.
*/
public void started(MutableShardRouting shard) {
public void started(ShardRouting shard) {
if (!shard.active() && shard.relocatingNodeId() == null) {
inactiveShardCount--;
if (shard.primary()) {
@ -424,7 +424,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Cancels a relocation of a shard that shard must relocating.
*/
public void cancelRelocation(MutableShardRouting shard) {
public void cancelRelocation(ShardRouting shard) {
relocatingShards--;
shard.cancelRelocation();
}
@ -434,8 +434,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
*
* @param shards the shard to have its primary status swapped.
*/
public void swapPrimaryFlag(MutableShardRouting... shards) {
for (MutableShardRouting shard : shards) {
public void swapPrimaryFlag(ShardRouting... shards) {
for (ShardRouting shard : shards) {
if (shard.primary()) {
shard.moveFromPrimary();
if (shard.unassigned()) {
@ -450,10 +450,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
}
private static final List<MutableShardRouting> EMPTY = Collections.emptyList();
private static final List<ShardRouting> EMPTY = Collections.emptyList();
private List<MutableShardRouting> assignedShards(ShardId shardId) {
final List<MutableShardRouting> replicaSet = assignedShards.get(shardId);
private List<ShardRouting> assignedShards(ShardId shardId) {
final List<ShardRouting> replicaSet = assignedShards.get(shardId);
return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet);
}
@ -462,7 +462,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* the relocation if the shard is relocating.
* @param shard
*/
private void remove(MutableShardRouting shard) {
private void remove(ShardRouting shard) {
if (!shard.active() && shard.relocatingNodeId() == null) {
inactiveShardCount--;
assert inactiveShardCount >= 0;
@ -475,12 +475,12 @@ public class RoutingNodes implements Iterable<RoutingNode> {
assignedShardsRemove(shard);
}
private void assignedShardsAdd(MutableShardRouting shard) {
private void assignedShardsAdd(ShardRouting shard) {
if (shard.unassigned()) {
// no unassigned
return;
}
List<MutableShardRouting> shards = assignedShards.get(shard.shardId());
List<ShardRouting> shards = assignedShards.get(shard.shardId());
if (shards == null) {
shards = Lists.newArrayList();
assignedShards.put(shard.shardId(), shards);
@ -489,17 +489,17 @@ public class RoutingNodes implements Iterable<RoutingNode> {
shards.add(shard);
}
private boolean assertInstanceNotInList(MutableShardRouting shard, List<MutableShardRouting> shards) {
for (MutableShardRouting s : shards) {
private boolean assertInstanceNotInList(ShardRouting shard, List<ShardRouting> shards) {
for (ShardRouting s : shards) {
assert s != shard;
}
return true;
}
private void assignedShardsRemove(MutableShardRouting shard) {
final List<MutableShardRouting> replicaSet = assignedShards.get(shard.shardId());
private void assignedShardsRemove(ShardRouting shard) {
final List<ShardRouting> replicaSet = assignedShards.get(shard.shardId());
if (replicaSet != null) {
final Iterator<MutableShardRouting> iterator = replicaSet.iterator();
final Iterator<ShardRouting> iterator = replicaSet.iterator();
while(iterator.hasNext()) {
// yes we check identity here
if (shard == iterator.next()) {
@ -532,7 +532,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return nodesToShards.values().toArray(new RoutingNode[nodesToShards.size()]);
}
public void reinitShadowPrimary(MutableShardRouting candidate) {
public void reinitShadowPrimary(ShardRouting candidate) {
if (candidate.relocating()) {
cancelRelocation(candidate);
}
@ -542,9 +542,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
public final static class UnassignedShards implements Iterable<MutableShardRouting> {
public final static class UnassignedShards implements Iterable<ShardRouting> {
private final List<MutableShardRouting> unassigned;
private final List<ShardRouting> unassigned;
private int primaries = 0;
private long transactionId = 0;
@ -564,16 +564,16 @@ public class RoutingNodes implements Iterable<RoutingNode> {
sourceTransactionId = -1;
}
public void add(MutableShardRouting mutableShardRouting) {
if(mutableShardRouting.primary()) {
public void add(ShardRouting shardRouting) {
if(shardRouting.primary()) {
primaries++;
}
unassigned.add(mutableShardRouting);
unassigned.add(shardRouting);
transactionId++;
}
public void addAll(Collection<MutableShardRouting> mutableShardRoutings) {
for (MutableShardRouting r : mutableShardRoutings) {
public void addAll(Collection<ShardRouting> mutableShardRoutings) {
for (ShardRouting r : mutableShardRoutings) {
add(r);
}
}
@ -587,17 +587,17 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
@Override
public Iterator<MutableShardRouting> iterator() {
final Iterator<MutableShardRouting> iterator = unassigned.iterator();
return new Iterator<MutableShardRouting>() {
private MutableShardRouting current;
public Iterator<ShardRouting> iterator() {
final Iterator<ShardRouting> iterator = unassigned.iterator();
return new Iterator<ShardRouting>() {
private ShardRouting current;
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public MutableShardRouting next() {
public ShardRouting next() {
return current = iterator.next();
}
@ -639,8 +639,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return new UnassignedShards(this);
}
public MutableShardRouting[] drain() {
MutableShardRouting[] mutableShardRoutings = unassigned.toArray(new MutableShardRouting[unassigned.size()]);
public ShardRouting[] drain() {
ShardRouting[] mutableShardRoutings = unassigned.toArray(new ShardRouting[unassigned.size()]);
unassigned.clear();
primaries = 0;
transactionId++;
@ -650,7 +650,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Calculates RoutingNodes statistics by iterating over all {@link MutableShardRouting}s
* Calculates RoutingNodes statistics by iterating over all {@link ShardRouting}s
* in the cluster to ensure the book-keeping is correct.
* For performance reasons, this should only be called from asserts
*
@ -670,7 +670,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
final Set<ShardId> seenShards = newHashSet();
Map<String, Integer> indicesAndShards = new HashMap<>();
for (RoutingNode node : routingNodes) {
for (MutableShardRouting shard : node) {
for (ShardRouting shard : node) {
if (!shard.active() && shard.relocatingNodeId() == null) {
if (!shard.relocating()) {
inactiveShardCount++;
@ -692,20 +692,20 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
// Assert that the active shard routing are identical.
Set<Map.Entry<String, Integer>> entries = indicesAndShards.entrySet();
final List<MutableShardRouting> shards = newArrayList();
final List<ShardRouting> shards = newArrayList();
for (Map.Entry<String, Integer> e : entries) {
String index = e.getKey();
for (int i = 0; i < e.getValue(); i++) {
for (RoutingNode routingNode : routingNodes) {
for (MutableShardRouting shardRouting : routingNode) {
for (ShardRouting shardRouting : routingNode) {
if (shardRouting.index().equals(index) && shardRouting.id() == i) {
shards.add(shardRouting);
}
}
}
List<MutableShardRouting> mutableShardRoutings = routingNodes.assignedShards(new ShardId(index, i));
List<ShardRouting> mutableShardRoutings = routingNodes.assignedShards(new ShardId(index, i));
assert mutableShardRoutings.size() == shards.size();
for (MutableShardRouting r : mutableShardRoutings) {
for (ShardRouting r : mutableShardRoutings) {
assert shards.contains(r);
shards.remove(r);
}
@ -713,7 +713,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
}
for (MutableShardRouting shard : routingNodes.unassigned()) {
for (ShardRouting shard : routingNodes.unassigned()) {
if (shard.primary()) {
unassignedPrimaryCount++;
}
@ -731,7 +731,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
public class RoutingNodesIterator implements Iterator<RoutingNode>, Iterable<MutableShardRouting> {
public class RoutingNodesIterator implements Iterator<RoutingNode>, Iterable<ShardRouting> {
private RoutingNode current;
private final Iterator<RoutingNode> delegate;
@ -759,15 +759,15 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
@Override
public Iterator<MutableShardRouting> iterator() {
public Iterator<ShardRouting> iterator() {
return nodeShards();
}
}
public final class RoutingNodeIterator implements Iterator<MutableShardRouting>, Iterable<MutableShardRouting> {
public final class RoutingNodeIterator implements Iterator<ShardRouting>, Iterable<ShardRouting> {
private final RoutingNode iterable;
private MutableShardRouting shard;
private final Iterator<MutableShardRouting> delegate;
private ShardRouting shard;
private final Iterator<ShardRouting> delegate;
public RoutingNodeIterator(RoutingNode iterable) {
this.delegate = iterable.mutableIterator();
@ -780,7 +780,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
@Override
public MutableShardRouting next() {
public ShardRouting next() {
return shard = delegate.next();
}
@ -791,13 +791,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
@Override
public Iterator<MutableShardRouting> iterator() {
public Iterator<ShardRouting> iterator() {
return iterable.iterator();
}
public void moveToUnassigned(UnassignedInfo unassignedInfo) {
remove();
MutableShardRouting unassigned = new MutableShardRouting(shard); // protective copy of the mutable shard
ShardRouting unassigned = new ShardRouting(shard); // protective copy of the mutable shard
unassigned.moveToUnassigned(unassignedInfo);
unassigned().add(unassigned);
}

View File

@ -346,7 +346,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
Map<String, IndexRoutingTable.Builder> indexRoutingTableBuilders = newHashMap();
for (RoutingNode routingNode : routingNodes) {
for (MutableShardRouting shardRoutingEntry : routingNode) {
for (ShardRouting shardRoutingEntry : routingNode) {
// every relocating shard has a double entry, ignore the target one.
if (shardRoutingEntry.state() == ShardRoutingState.INITIALIZING && shardRoutingEntry.relocatingNodeId() != null)
continue;
@ -362,7 +362,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
indexBuilder.addShard(refData, shardRoutingEntry);
}
}
for (MutableShardRouting shardRoutingEntry : Iterables.concat(routingNodes.unassigned(), routingNodes.ignoredUnassigned())) {
for (ShardRouting shardRoutingEntry : Iterables.concat(routingNodes.unassigned(), routingNodes.ignoredUnassigned())) {
String index = shardRoutingEntry.index();
IndexRoutingTable.Builder indexBuilder = indexRoutingTableBuilders.get(index);
if (indexBuilder == null) {

View File

@ -19,78 +19,142 @@
package org.elasticsearch.cluster.routing;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
/**
* Shard routing represents the state of a shard instance allocated in the cluster.
* {@link ShardRouting} immutably encapsulates information about shard
* routings like id, state, version, etc.
*/
public interface ShardRouting extends Streamable, Serializable, ToXContent {
public final class ShardRouting implements Streamable, Serializable, ToXContent {
/**
* The shard id.
*/
ShardId shardId();
protected String index;
protected int shardId;
protected String currentNodeId;
protected String relocatingNodeId;
protected boolean primary;
protected ShardRoutingState state;
protected long version;
private transient ShardId shardIdentifier;
protected RestoreSource restoreSource;
protected UnassignedInfo unassignedInfo;
private final transient List<ShardRouting> asList;
private boolean frozen = false;
ShardRouting() {
this.asList = Arrays.asList(this);
}
public ShardRouting(ShardRouting copy) {
this(copy, copy.version());
}
public ShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo());
}
public ShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, null, primary, state, version);
}
public ShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version);
}
public ShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
this(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, null);
}
public ShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
UnassignedInfo unassignedInfo) {
this.index = index;
this.shardId = shardId;
this.currentNodeId = currentNodeId;
this.relocatingNodeId = relocatingNodeId;
this.primary = primary;
this.state = state;
this.asList = Arrays.asList(this);
this.version = version;
this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
}
/**
* The index name.
*/
String index();
public String index() {
return this.index;
}
/**
* The index name.
*/
String getIndex();
public String getIndex() {
return index();
}
/**
* The shard id.
*/
int id();
public int id() {
return this.shardId;
}
/**
* The shard id.
*/
int getId();
public int getId() {
return id();
}
/**
* The routing version associated with the shard.
*/
long version();
/**
* The shard state.
*/
ShardRoutingState state();
public long version() {
return this.version;
}
/**
* The shard is unassigned (not allocated to any node).
*/
boolean unassigned();
public boolean unassigned() {
return state == ShardRoutingState.UNASSIGNED;
}
/**
* The shard is initializing (usually recovering either from peer shard
* or from gateway).
*/
boolean initializing();
/**
* The shard is in started mode.
*/
boolean started();
/**
* Returns <code>true</code> iff the this shard is currently relocating to
* another node. Otherwise <code>false</code>
*
* @see ShardRoutingState#RELOCATING
*/
boolean relocating();
public boolean initializing() {
return state == ShardRoutingState.INITIALIZING;
}
/**
* Returns <code>true</code> iff the this shard is currently
@ -98,61 +162,427 @@ public interface ShardRouting extends Streamable, Serializable, ToXContent {
* {@link ShardRoutingState#RELOCATING relocating} to another node.
* Otherwise <code>false</code>
*/
boolean active();
public boolean active() {
return started() || relocating();
}
/**
* The shard is in started mode.
*/
public boolean started() {
return state == ShardRoutingState.STARTED;
}
/**
* Returns <code>true</code> iff the this shard is currently relocating to
* another node. Otherwise <code>false</code>
*
* @see ShardRoutingState#RELOCATING
*/
public boolean relocating() {
return state == ShardRoutingState.RELOCATING;
}
/**
* Returns <code>true</code> iff this shard is assigned to a node ie. not
* {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise <code>false</code>
*/
boolean assignedToNode();
public boolean assignedToNode() {
return currentNodeId != null;
}
/**
* The current node id the shard is allocated on.
*/
String currentNodeId();
public String currentNodeId() {
return this.currentNodeId;
}
/**
* The relocating node id the shard is either relocating to or relocating from.
*/
String relocatingNodeId();
public String relocatingNodeId() {
return this.relocatingNodeId;
}
/**
* If the shard is relocating, return a shard routing representing the target shard or null o.w.
* The target shard routing will be the INITIALIZING state and have relocatingNodeId set to the
* source node.
*/
ShardRouting targetRoutingIfRelocating();
public ShardRouting targetRoutingIfRelocating() {
if (!relocating()) {
return null;
}
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, primary, ShardRoutingState.INITIALIZING, version);
}
/**
* Snapshot id and repository where this shard is being restored from
*/
RestoreSource restoreSource();
public RestoreSource restoreSource() {
return restoreSource;
}
/**
* Additional metadata on why the shard is/was unassigned. The metadata is kept around
* until the shard moves to STARTED.
*/
UnassignedInfo unassignedInfo();
@Nullable
public UnassignedInfo unassignedInfo() {
return unassignedInfo;
}
/**
* Returns <code>true</code> iff this shard is a primary.
*/
boolean primary();
public boolean primary() {
return this.primary;
}
/**
* A short description of the shard.
* The shard state.
*/
String shortSummary();
public ShardRoutingState state() {
return this.state;
}
/**
* The shard id.
*/
public ShardId shardId() {
if (shardIdentifier != null) {
return shardIdentifier;
}
shardIdentifier = new ShardId(index, shardId);
return shardIdentifier;
}
/**
* A shard iterator with just this shard in it.
*/
ShardIterator shardsIt();
public ShardIterator shardsIt() {
return new PlainShardIterator(shardId(), asList);
}
public static ShardRouting readShardRoutingEntry(StreamInput in) throws IOException {
ShardRouting entry = new ShardRouting();
entry.readFrom(in);
return entry;
}
public static ShardRouting readShardRoutingEntry(StreamInput in, String index, int shardId) throws IOException {
ShardRouting entry = new ShardRouting();
entry.readFrom(in, index, shardId);
return entry;
}
public void readFrom(StreamInput in, String index, int shardId) throws IOException {
this.index = index;
this.shardId = shardId;
readFromThin(in);
}
public void readFromThin(StreamInput in) throws IOException {
version = in.readLong();
if (in.readBoolean()) {
currentNodeId = in.readString();
}
if (in.readBoolean()) {
relocatingNodeId = in.readString();
}
primary = in.readBoolean();
state = ShardRoutingState.fromValue(in.readByte());
restoreSource = RestoreSource.readOptionalRestoreSource(in);
if (in.readBoolean()) {
unassignedInfo = new UnassignedInfo(in);
}
freeze();
}
@Override
public void readFrom(StreamInput in) throws IOException {
readFrom(in, in.readString(), in.readVInt());
}
/**
* Does not write index name and shard id
* Writes shard information to {@link StreamOutput} without writing index name and shard id
*
* @param out {@link StreamOutput} to write shard information to
* @throws IOException if something happens during write
*/
void writeToThin(StreamOutput out) throws IOException;
public void writeToThin(StreamOutput out) throws IOException {
out.writeLong(version);
if (currentNodeId != null) {
out.writeBoolean(true);
out.writeString(currentNodeId);
} else {
out.writeBoolean(false);
}
void readFromThin(StreamInput in) throws ClassNotFoundException, IOException;
if (relocatingNodeId != null) {
out.writeBoolean(true);
out.writeString(relocatingNodeId);
} else {
out.writeBoolean(false);
}
out.writeBoolean(primary);
out.writeByte(state.value());
if (restoreSource != null) {
out.writeBoolean(true);
restoreSource.writeTo(out);
} else {
out.writeBoolean(false);
}
if (unassignedInfo != null) {
out.writeBoolean(true);
unassignedInfo.writeTo(out);
} else {
out.writeBoolean(false);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
out.writeVInt(shardId);
writeToThin(out);
}
// package private mutators start here
/**
* Moves the shard to unassigned state.
*/
void moveToUnassigned(UnassignedInfo unassignedInfo) {
ensureNotFrozen();
version++;
assert state != ShardRoutingState.UNASSIGNED;
state = ShardRoutingState.UNASSIGNED;
currentNodeId = null;
relocatingNodeId = null;
this.unassignedInfo = unassignedInfo;
}
/**
* Assign this shard to a node.
*
* @param nodeId id of the node to assign this shard to
*/
void assignToNode(String nodeId) {
ensureNotFrozen();
version++;
if (currentNodeId == null) {
assert state == ShardRoutingState.UNASSIGNED;
state = ShardRoutingState.INITIALIZING;
currentNodeId = nodeId;
relocatingNodeId = null;
} else if (state == ShardRoutingState.STARTED) {
state = ShardRoutingState.RELOCATING;
relocatingNodeId = nodeId;
} else if (state == ShardRoutingState.RELOCATING) {
assert nodeId.equals(relocatingNodeId);
}
}
/**
* Relocate the shard to another node.
*
* @param relocatingNodeId id of the node to relocate the shard
*/
void relocate(String relocatingNodeId) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.STARTED;
state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId;
}
/**
* Cancel relocation of a shard. The shards state must be set
* to <code>RELOCATING</code>.
*/
void cancelRelocation() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.RELOCATING;
assert assignedToNode();
assert relocatingNodeId != null;
state = ShardRoutingState.STARTED;
relocatingNodeId = null;
}
/**
* Moves the shard from started to initializing and bumps the version
*/
void reinitializeShard() {
ensureNotFrozen();
assert state == ShardRoutingState.STARTED;
version++;
state = ShardRoutingState.INITIALIZING;
}
/**
* Set the shards state to <code>STARTED</code>. The shards state must be
* <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
* canceled.
*/
void moveToStarted() {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING;
relocatingNodeId = null;
restoreSource = null;
state = ShardRoutingState.STARTED;
unassignedInfo = null; // we keep the unassigned data until the shard is started
}
/**
* Make the shard primary unless it's not Primary
* //TODO: doc exception
*/
void moveToPrimary() {
ensureNotFrozen();
version++;
if (primary) {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
primary = true;
}
/**
* Set the primary shard to non-primary
*/
void moveFromPrimary() {
ensureNotFrozen();
version++;
if (!primary) {
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
}
primary = false;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
// we check on instanceof so we also handle the ImmutableShardRouting case as well
if (o == null || !(o instanceof ShardRouting)) {
return false;
}
ShardRouting that = (ShardRouting) o;
if (primary != that.primary) {
return false;
}
if (shardId != that.shardId) {
return false;
}
if (currentNodeId != null ? !currentNodeId.equals(that.currentNodeId) : that.currentNodeId != null) {
return false;
}
if (index != null ? !index.equals(that.index) : that.index != null) {
return false;
}
if (relocatingNodeId != null ? !relocatingNodeId.equals(that.relocatingNodeId) : that.relocatingNodeId != null) {
return false;
}
if (state != that.state) {
return false;
}
if (restoreSource != null ? !restoreSource.equals(that.restoreSource) : that.restoreSource != null) {
return false;
}
return true;
}
private long hashVersion = version-1;
private int hashCode = 0;
@Override
public int hashCode() {
if (hashVersion == version) {
return hashCode;
}
int result = index != null ? index.hashCode() : 0;
result = 31 * result + shardId;
result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0);
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
result = 31 * result + (state != null ? state.hashCode() : 0);
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
return hashCode = result;
}
@Override
public String toString() {
return shortSummary();
}
/**
* A short description of the shard.
*/
public String shortSummary() {
StringBuilder sb = new StringBuilder();
sb.append('[').append(index).append(']').append('[').append(shardId).append(']');
sb.append(", node[").append(currentNodeId).append("], ");
if (relocatingNodeId != null) {
sb.append("relocating [").append(relocatingNodeId).append("], ");
}
if (primary) {
sb.append("[P]");
} else {
sb.append("[R]");
}
if (this.restoreSource != null) {
sb.append(", restoring[" + restoreSource + "]");
}
sb.append(", s[").append(state).append("]");
if (this.unassignedInfo != null) {
sb.append(", ").append(unassignedInfo.toString());
}
return sb.toString();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject()
.field("state", state())
.field("primary", primary())
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", shardId().id())
.field("index", shardId().index().name());
if (restoreSource() != null) {
builder.field("restore_source");
restoreSource().toXContent(builder, params);
}
if (unassignedInfo != null) {
unassignedInfo.toXContent(builder, params);
}
return builder.endObject();
}
private void ensureNotFrozen() {
if (frozen) {
throw new IllegalStateException("ShardRouting can't be modified anymore - already frozen");
}
}
void freeze() {
frozen = true;
}
boolean isFrozen() {
return frozen;
}
}

View File

@ -194,7 +194,7 @@ public class AllocationService extends AbstractComponent {
boolean changed = false;
// create a copy of the shards interleaving between nodes, and check if they can remain
List<MutableShardRouting> shards = new ArrayList<>();
List<ShardRouting> shards = new ArrayList<>();
int index = 0;
boolean found = true;
final RoutingNodes routingNodes = allocation.routingNodes();
@ -210,7 +210,7 @@ public class AllocationService extends AbstractComponent {
index++;
}
for (int i = 0; i < shards.size(); i++) {
MutableShardRouting shardRouting = shards.get(i);
ShardRouting shardRouting = shards.get(i);
// we can only move started shards...
if (!shardRouting.started()) {
continue;
@ -240,9 +240,9 @@ public class AllocationService extends AbstractComponent {
// go over and remove dangling replicas that are initializing for primary shards
List<ShardRouting> shardsToFail = Lists.newArrayList();
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
for (ShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
for (MutableShardRouting routing : routingNodes.assignedShards(shardEntry)) {
for (ShardRouting routing : routingNodes.assignedShards(shardEntry)) {
if (!routing.primary() && routing.initializing()) {
shardsToFail.add(routing);
}
@ -257,9 +257,9 @@ public class AllocationService extends AbstractComponent {
// now, go over and elect a new primary if possible, not, from this code block on, if one is elected,
// routingNodes.hasUnassignedPrimaries() will potentially be false
for (MutableShardRouting shardEntry : routingNodes.unassigned()) {
for (ShardRouting shardEntry : routingNodes.unassigned()) {
if (shardEntry.primary()) {
MutableShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
if (candidate != null) {
IndexMetaData index = allocation.metaData().index(candidate.index());
routingNodes.swapPrimaryFlag(shardEntry, candidate);
@ -268,7 +268,7 @@ public class AllocationService extends AbstractComponent {
// its also relocating, make sure to move the other routing to primary
RoutingNode node = routingNodes.node(candidate.relocatingNodeId());
if (node != null) {
for (MutableShardRouting shardRouting : node) {
for (ShardRouting shardRouting : node) {
if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) {
routingNodes.swapPrimaryFlag(shardRouting);
break;
@ -312,7 +312,7 @@ public class AllocationService extends AbstractComponent {
changed = true;
// now, go over all the shards routing on the node, and fail them
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]");
for (MutableShardRouting shardRouting : node.copyShards()) {
for (ShardRouting shardRouting : node.copyShards()) {
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
@ -333,7 +333,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId());
if (currentRoutingNode != null) {
for (MutableShardRouting shard : currentRoutingNode) {
for (ShardRouting shard : currentRoutingNode) {
if (shard.shardId().equals(startedShard.shardId())) {
relocatingNodeId = shard.relocatingNodeId();
if (!shard.started()) {
@ -356,7 +356,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(relocatingNodeId);
if (sourceRoutingNode != null) {
while (sourceRoutingNode.hasNext()) {
MutableShardRouting shard = sourceRoutingNode.next();
ShardRouting shard = sourceRoutingNode.next();
if (shard.shardId().equals(startedShard.shardId())) {
if (shard.relocating()) {
dirty = true;
@ -377,7 +377,7 @@ public class AllocationService extends AbstractComponent {
private boolean applyFailedShard(RoutingAllocation allocation, ShardRouting failedShard, boolean addToIgnoreList, UnassignedInfo unassignedInfo) {
// create a copy of the failed shard, since we assume we can change possible references to it without
// changing the state of failed shard
failedShard = new ImmutableShardRouting(failedShard);
failedShard = new ShardRouting(failedShard);
IndexRoutingTable indexRoutingTable = allocation.routingTable().index(failedShard.index());
if (indexRoutingTable == null) {
@ -394,7 +394,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (initializingNode != null) {
while (initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
ShardRouting shardRouting = initializingNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
initializingNode.remove();
@ -411,7 +411,7 @@ public class AllocationService extends AbstractComponent {
// now, find the node that we are relocating *from*, and cancel its relocation
RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId());
if (relocatingFromNode != null) {
for (MutableShardRouting shardRouting : relocatingFromNode) {
for (ShardRouting shardRouting : relocatingFromNode) {
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.relocating()) {
dirty = true;
routingNodes.cancelRelocation(shardRouting);
@ -431,7 +431,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator relocatingFromNode = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (relocatingFromNode != null) {
while (relocatingFromNode.hasNext()) {
MutableShardRouting shardRouting = relocatingFromNode.next();
ShardRouting shardRouting = relocatingFromNode.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
if (addToIgnoreList) {
@ -448,7 +448,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId());
if (initializingNode != null) {
while (initializingNode.hasNext()) {
MutableShardRouting shardRouting = initializingNode.next();
ShardRouting shardRouting = initializingNode.next();
if (shardRouting.shardId().equals(failedShard.shardId()) && shardRouting.state() == INITIALIZING) {
dirty = true;
initializingNode.remove();
@ -466,7 +466,7 @@ public class AllocationService extends AbstractComponent {
RoutingNodes.RoutingNodeIterator node = routingNodes.routingNodeIter(failedShard.currentNodeId());
if (node != null) {
while (node.hasNext()) {
MutableShardRouting shardRouting = node.next();
ShardRouting shardRouting = node.next();
if (shardRouting.equals(failedShard)) {
dirty = true;
if (addToIgnoreList) {
@ -477,9 +477,9 @@ public class AllocationService extends AbstractComponent {
// so we give a chance for other allocations and won't create poison failed allocations
// that can keep other shards from being allocated (because of limits applied on how many
// shards we can start per node)
List<MutableShardRouting> shardsToMove = Lists.newArrayList();
for (Iterator<MutableShardRouting> unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) {
MutableShardRouting unassignedShardRouting = unassignedIt.next();
List<ShardRouting> shardsToMove = Lists.newArrayList();
for (Iterator<ShardRouting> unassignedIt = routingNodes.unassigned().iterator(); unassignedIt.hasNext(); ) {
ShardRouting unassignedShardRouting = unassignedIt.next();
if (unassignedShardRouting.shardId().equals(failedShard.shardId())) {
unassignedIt.remove();
shardsToMove.add(unassignedShardRouting);

View File

@ -23,7 +23,7 @@ import com.google.common.base.Predicate;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -118,7 +118,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
@Override
public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.move(shardRouting, node);
}
@ -227,9 +227,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
private final float threshold;
private final MetaData metaData;
private final Predicate<MutableShardRouting> assignedFilter = new Predicate<MutableShardRouting>() {
private final Predicate<ShardRouting> assignedFilter = new Predicate<ShardRouting>() {
@Override
public boolean apply(MutableShardRouting input) {
public boolean apply(ShardRouting input) {
return input.assignedToNode();
}
};
@ -476,7 +476,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
*
* @return <code>true</code> iff the shard has successfully been moved.
*/
public boolean move(MutableShardRouting shard, RoutingNode node ) {
public boolean move(ShardRouting shard, RoutingNode node ) {
if (nodes.isEmpty() || !shard.started()) {
/* with no nodes or a not started shard this is pointless */
return false;
@ -508,7 +508,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shard);
final MutableShardRouting initializingShard = new MutableShardRouting(shard.index(), shard.id(), currentNode.getNodeId(),
final ShardRouting initializingShard = new ShardRouting(shard.index(), shard.id(), currentNode.getNodeId(),
shard.currentNodeId(), shard.restoreSource(), shard.primary(), INITIALIZING, shard.version() + 1);
currentNode.addShard(initializingShard, decision);
routingNodes.assign(initializingShard, target.nodeId());
@ -534,8 +534,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* on the target node which we respect during the allocation / balancing
* process. In short, this method recreates the status-quo in the cluster.
*/
private void buildModelFromAssigned(Iterable<MutableShardRouting> shards) {
for (MutableShardRouting shard : shards) {
private void buildModelFromAssigned(Iterable<ShardRouting> shards) {
for (ShardRouting shard : shards) {
assert shard.assignedToNode();
/* we skip relocating shards here since we expect an initializing shard with the same id coming in */
if (shard.state() == RELOCATING) {
@ -554,7 +554,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* Allocates all given shards on the minimal eligable node for the shards index
* with respect to the weight function. All given shards must be unassigned.
*/
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned, List<MutableShardRouting> ignoredUnassigned) {
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned, List<ShardRouting> ignoredUnassigned) {
assert !nodes.isEmpty();
if (logger.isTraceEnabled()) {
logger.trace("Start allocating unassigned shards");
@ -569,10 +569,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* use the sorter to save some iterations.
*/
final AllocationDeciders deciders = allocation.deciders();
final Comparator<MutableShardRouting> comparator = new Comparator<MutableShardRouting>() {
final Comparator<ShardRouting> comparator = new Comparator<ShardRouting>() {
@Override
public int compare(MutableShardRouting o1,
MutableShardRouting o2) {
public int compare(ShardRouting o1,
ShardRouting o2) {
if (o1.primary() ^ o2.primary()) {
return o1.primary() ? -1 : o2.primary() ? 1 : 0;
}
@ -591,15 +591,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
*/
MutableShardRouting[] primary = unassigned.drain();
MutableShardRouting[] secondary = new MutableShardRouting[primary.length];
ShardRouting[] primary = unassigned.drain();
ShardRouting[] secondary = new ShardRouting[primary.length];
int secondaryLength = 0;
int primaryLength = primary.length;
ArrayUtil.timSort(primary, comparator);
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<ModelNode, Boolean>());
do {
for (int i = 0; i < primaryLength; i++) {
MutableShardRouting shard = primary[i];
ShardRouting shard = primary[i];
if (!shard.primary()) {
boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
if (drop) {
@ -717,7 +717,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
primaryLength = secondaryLength;
MutableShardRouting[] tmp = primary;
ShardRouting[] tmp = primary;
primary = secondary;
secondary = tmp;
secondaryLength = 0;
@ -740,11 +740,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
minNode.getNodeId());
}
final RoutingNode node = routingNodes.node(minNode.getNodeId());
MutableShardRouting candidate = null;
ShardRouting candidate = null;
final AllocationDeciders deciders = allocation.deciders();
/* make a copy since we modify this list in the loop */
final ArrayList<MutableShardRouting> shards = new ArrayList<>(index.getAllShards());
for (MutableShardRouting shard : shards) {
final ArrayList<ShardRouting> shards = new ArrayList<>(index.getAllShards());
for (ShardRouting shard : shards) {
if (shard.started()) {
// skip initializing, unassigned and relocating shards we can't relocate them anyway
Decision allocationDecision = deciders.canAllocate(shard, node, allocation);
@ -783,7 +783,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
/* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) {
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
routingNodes.assign(new MutableShardRouting(candidate.index(), candidate.id(), lowRoutingNode.nodeId(), candidate
routingNodes.assign(new ShardRouting(candidate.index(), candidate.id(), lowRoutingNode.nodeId(), candidate
.currentNodeId(), candidate.restoreSource(), candidate.primary(), INITIALIZING, candidate.version() + 1), lowRoutingNode.nodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId());
@ -839,8 +839,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return index == null ? 0 : index.numShards();
}
public Collection<MutableShardRouting> shards() {
Collection<MutableShardRouting> result = new ArrayList<>();
public Collection<ShardRouting> shards() {
Collection<ShardRouting> result = new ArrayList<>();
for (ModelIndex index : indices.values()) {
result.addAll(index.getAllShards());
}
@ -855,7 +855,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return -1;
}
public void addShard(MutableShardRouting shard, Decision decision) {
public void addShard(ShardRouting shard, Decision decision) {
numShards = -1;
ModelIndex index = indices.get(shard.index());
if (index == null) {
@ -865,7 +865,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
index.addShard(shard, decision);
}
public Decision removeShard(MutableShardRouting shard) {
public Decision removeShard(ShardRouting shard) {
numShards = -1;
ModelIndex index = indices.get(shard.index());
Decision removed = null;
@ -890,7 +890,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return indices.values().iterator();
}
public boolean containsShard(MutableShardRouting shard) {
public boolean containsShard(ShardRouting shard) {
ModelIndex index = getIndex(shard.getIndex());
return index == null ? false : index.containsShard(shard);
}
@ -899,7 +899,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
static final class ModelIndex {
private final String id;
private final Map<MutableShardRouting, Decision> shards = new HashMap<>();
private final Map<ShardRouting, Decision> shards = new HashMap<>();
private int numPrimaries = -1;
private int highestPrimary = -1;
@ -910,7 +910,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
public int highestPrimary() {
if (highestPrimary == -1) {
int maxId = -1;
for (MutableShardRouting shard : shards.keySet()) {
for (ShardRouting shard : shards.keySet()) {
if (shard.primary()) {
maxId = Math.max(maxId, shard.id());
}
@ -924,7 +924,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return id;
}
public Decision getDecicion(MutableShardRouting shard) {
public Decision getDecicion(ShardRouting shard) {
return shards.get(shard);
}
@ -932,14 +932,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return shards.size();
}
public Collection<MutableShardRouting> getAllShards() {
public Collection<ShardRouting> getAllShards() {
return shards.keySet();
}
public int numPrimaries() {
if (numPrimaries == -1) {
int num = 0;
for (MutableShardRouting shard : shards.keySet()) {
for (ShardRouting shard : shards.keySet()) {
if (shard.primary()) {
num++;
}
@ -949,19 +949,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return numPrimaries;
}
public Decision removeShard(MutableShardRouting shard) {
public Decision removeShard(ShardRouting shard) {
highestPrimary = numPrimaries = -1;
return shards.remove(shard);
}
public void addShard(MutableShardRouting shard, Decision decision) {
public void addShard(ShardRouting shard, Decision decision) {
highestPrimary = numPrimaries = -1;
assert decision != null;
assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard;
shards.put(shard, decision);
}
public boolean containsShard(MutableShardRouting shard) {
public boolean containsShard(ShardRouting shard) {
return shards.containsKey(shard);
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -74,5 +73,5 @@ public interface ShardsAllocator {
* @param allocation current node allocation
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -80,7 +80,7 @@ public class ShardsAllocators extends AbstractComponent implements ShardsAllocat
}
@Override
public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return allocator.move(shardRouting, node, allocation);
}
}

View File

@ -19,10 +19,9 @@
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -168,8 +167,8 @@ public class AllocateAllocationCommand implements AllocationCommand {
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
MutableShardRouting shardRouting = null;
for (MutableShardRouting routing : allocation.routingNodes().unassigned()) {
ShardRouting shardRouting = null;
for (ShardRouting routing : allocation.routingNodes().unassigned()) {
if (routing.shardId().equals(shardId)) {
// prefer primaries first to allocate
if (shardRouting == null || routing.primary()) {
@ -219,7 +218,7 @@ public class AllocateAllocationCommand implements AllocationCommand {
throw new IllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision);
}
// go over and remove it from the unassigned
for (Iterator<MutableShardRouting> it = allocation.routingNodes().unassigned().iterator(); it.hasNext(); ) {
for (Iterator<ShardRouting> it = allocation.routingNodes().unassigned().iterator(); it.hasNext(); ) {
if (it.next() != shardRouting) {
continue;
}

View File

@ -164,7 +164,7 @@ public class CancelAllocationCommand implements AllocationCommand {
DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
boolean found = false;
for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) {
MutableShardRouting shardRouting = it.next();
ShardRouting shardRouting = it.next();
if (!shardRouting.shardId().equals(shardId)) {
continue;
}
@ -176,7 +176,7 @@ public class CancelAllocationCommand implements AllocationCommand {
// and cancel the relocating state from the shard its being relocated from
RoutingNode relocatingFromNode = allocation.routingNodes().node(shardRouting.relocatingNodeId());
if (relocatingFromNode != null) {
for (MutableShardRouting fromShardRouting : relocatingFromNode) {
for (ShardRouting fromShardRouting : relocatingFromNode) {
if (fromShardRouting.shardId().equals(shardRouting.shardId()) && fromShardRouting.state() == RELOCATING) {
allocation.routingNodes().cancelRelocation(fromShardRouting);
break;
@ -200,7 +200,7 @@ public class CancelAllocationCommand implements AllocationCommand {
RoutingNodes.RoutingNodeIterator initializingNode = allocation.routingNodes().routingNodeIter(shardRouting.relocatingNodeId());
if (initializingNode != null) {
while (initializingNode.hasNext()) {
MutableShardRouting initializingShardRouting = initializingNode.next();
ShardRouting initializingShardRouting = initializingNode.next();
if (initializingShardRouting.shardId().equals(shardRouting.shardId()) && initializingShardRouting.state() == INITIALIZING) {
initializingNode.remove();
}

View File

@ -19,10 +19,9 @@
package org.elasticsearch.cluster.routing.allocation.command;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
@ -152,7 +151,7 @@ public class MoveAllocationCommand implements AllocationCommand {
Decision decision = null;
boolean found = false;
for (MutableShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.id())) {
for (ShardRouting shardRouting : allocation.routingNodes().node(fromDiscoNode.id())) {
if (!shardRouting.shardId().equals(shardId)) {
continue;
}
@ -180,7 +179,7 @@ public class MoveAllocationCommand implements AllocationCommand {
// its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it...
}
allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(),
allocation.routingNodes().assign(new ShardRouting(shardRouting.index(), shardRouting.id(),
toRoutingNode.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(),
shardRouting.primary(), ShardRoutingState.INITIALIZING, shardRouting.version() + 1), toRoutingNode.nodeId());

View File

@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.google.common.collect.Maps;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -185,7 +184,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
// build the count of shards per attribute value
ObjectIntHashMap<String> shardPerAttribute = new ObjectIntHashMap<>();
for (MutableShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting)) {
for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting)) {
// if the shard is relocating, then make sure we count it as part of the node it is relocating to
if (assignedShard.relocating()) {
RoutingNode relocationNode = allocation.routingNodes().node(assignedShard.relocatingNodeId());

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@ -53,7 +52,7 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
// we are the primary we can allocate wherever
return allocation.decision(Decision.YES, NAME, "primary shard can be allocated anywhere");
}
final MutableShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
if (primary == null) { // we have a primary - it's a start ;)
return allocation.decision(Decision.YES, NAME, "no active primary shard yet");
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -48,7 +47,7 @@ public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecide
if (shardRouting.primary()) {
return allocation.decision(Decision.YES, NAME, "shard is primary");
}
MutableShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
if (primary == null) {
return allocation.decision(Decision.NO, NAME, "primary shard is not yet active");
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -59,8 +58,8 @@ public class SameShardAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
Iterable<MutableShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting);
for (MutableShardRouting assignedShard : assignedShards) {
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting);
for (ShardRouting assignedShard : assignedShards) {
if (node.nodeId().equals(assignedShard.currentNodeId())) {
return allocation.decision(Decision.NO, NAME, "shard cannot be allocated on same node [%s] it already exists on", node.nodeId());
}
@ -83,7 +82,7 @@ public class SameShardAllocationDecider extends AllocationDecider {
}
}
if (checkNodeOnSameHost) {
for (MutableShardRouting assignedShard : assignedShards) {
for (ShardRouting assignedShard : assignedShards) {
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
return allocation.decision(Decision.NO, NAME,
"shard cannot be allocated on same host [%s] it already exists on", node.nodeId());

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -71,7 +70,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
}
int nodeCount = 0;
for (MutableShardRouting nodeShard : node) {
for (ShardRouting nodeShard : node) {
if (!nodeShard.index().equals(shardRouting.index())) {
continue;
}
@ -97,7 +96,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
}
int nodeCount = 0;
for (MutableShardRouting nodeShard : node) {
for (ShardRouting nodeShard : node) {
if (!nodeShard.index().equals(shardRouting.index())) {
continue;
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -81,7 +80,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
// primary is unassigned, means we are going to do recovery from gateway
// count *just the primary* currently doing recovery on the node and check against concurrent_recoveries
int primariesInRecovery = 0;
for (MutableShardRouting shard : node) {
for (ShardRouting shard : node) {
// when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*
// we only count initial recoveries here, so we need to make sure that relocating node is null
if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary() && shard.relocatingNodeId() == null) {
@ -106,7 +105,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
int currentRecoveries = 0;
for (MutableShardRouting shard : node) {
for (ShardRouting shard : node) {
if (shard.state() == ShardRoutingState.INITIALIZING || shard.state() == ShardRoutingState.RELOCATING) {
currentRecoveries++;
}

View File

@ -500,28 +500,36 @@ public final class XContentBuilder implements BytesStream, Releasable {
public XContentBuilder field(String name, BigDecimal value, int scale, RoundingMode rounding, boolean toDouble) throws IOException {
field(name);
if (toDouble) {
try {
generator.writeNumber(value.setScale(scale, rounding).doubleValue());
} catch (ArithmeticException e) {
if (value == null) {
generator.writeNull();
} else {
if (toDouble) {
try {
generator.writeNumber(value.setScale(scale, rounding).doubleValue());
} catch (ArithmeticException e) {
generator.writeString(value.toEngineeringString());
}
} else {
generator.writeString(value.toEngineeringString());
}
} else {
generator.writeString(value.toEngineeringString());
}
return this;
}
public XContentBuilder field(XContentBuilderString name, BigDecimal value, int scale, RoundingMode rounding, boolean toDouble) throws IOException {
field(name);
if (toDouble) {
try {
generator.writeNumber(value.setScale(scale, rounding).doubleValue());
} catch (ArithmeticException e) {
if (value == null) {
generator.writeNull();
} else {
if (toDouble) {
try {
generator.writeNumber(value.setScale(scale, rounding).doubleValue());
} catch (ArithmeticException e) {
generator.writeString(value.toEngineeringString());
}
} else {
generator.writeString(value.toEngineeringString());
}
} else {
generator.writeString(value.toEngineeringString());
}
return this;
}

View File

@ -64,8 +64,8 @@ public class XContentHelper {
public static Tuple<XContentType, Map<String, Object>> convertToMap(BytesReference bytes, boolean ordered) throws ElasticsearchParseException {
try {
XContentParser parser;
XContentType contentType;
InputStream input;
Compressor compressor = CompressorFactory.compressor(bytes);
if (compressor != null) {
InputStream compressedStreamInput = compressor.streamInput(bytes.streamInput());
@ -73,15 +73,17 @@ public class XContentHelper {
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
}
contentType = XContentFactory.xContentType(compressedStreamInput);
parser = XContentFactory.xContent(contentType).createParser(compressedStreamInput);
input = compressedStreamInput;
} else {
contentType = XContentFactory.xContentType(bytes);
parser = XContentFactory.xContent(contentType).createParser(bytes.streamInput());
input = bytes.streamInput();
}
if (ordered) {
return Tuple.tuple(contentType, parser.mapOrderedAndClose());
} else {
return Tuple.tuple(contentType, parser.mapAndClose());
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(input)) {
if (ordered) {
return Tuple.tuple(contentType, parser.mapOrdered());
} else {
return Tuple.tuple(contentType, parser.map());
}
}
} catch (IOException e) {
throw new ElasticsearchParseException("Failed to parse content to map", e);

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lease.Releasable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
@ -130,9 +131,9 @@ public interface XContentParser extends Releasable {
Map<String, Object> mapOrdered() throws IOException;
Map<String, Object> mapAndClose() throws IOException;
List<Object> list() throws IOException;
Map<String, Object> mapOrderedAndClose() throws IOException;
List<Object> listOrderedMap() throws IOException;
String text() throws IOException;

View File

@ -214,24 +214,15 @@ public abstract class AbstractXContentParser implements XContentParser {
}
@Override
public Map<String, Object> mapAndClose() throws IOException {
try {
return map();
} finally {
close();
}
public List<Object> list() throws IOException {
return readList(this);
}
@Override
public Map<String, Object> mapOrderedAndClose() throws IOException {
try {
return mapOrdered();
} finally {
close();
}
public List<Object> listOrderedMap() throws IOException {
return readListOrderedMap(this);
}
static interface MapFactory {
Map<String, Object> newMap();
}
@ -258,6 +249,14 @@ public abstract class AbstractXContentParser implements XContentParser {
return readMap(parser, ORDERED_MAP_FACTORY);
}
static List<Object> readList(XContentParser parser) throws IOException {
return readList(parser, SIMPLE_MAP_FACTORY);
}
static List<Object> readListOrderedMap(XContentParser parser) throws IOException {
return readList(parser, ORDERED_MAP_FACTORY);
}
static Map<String, Object> readMap(XContentParser parser, MapFactory mapFactory) throws IOException {
Map<String, Object> map = mapFactory.newMap();
XContentParser.Token token = parser.currentToken();
@ -278,15 +277,22 @@ public abstract class AbstractXContentParser implements XContentParser {
return map;
}
private static List<Object> readList(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException {
static List<Object> readList(XContentParser parser, MapFactory mapFactory) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.FIELD_NAME) {
token = parser.nextToken();
}
if (token == XContentParser.Token.START_ARRAY) {
token = parser.nextToken();
}
ArrayList<Object> list = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
for (; token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) {
list.add(readValue(parser, mapFactory, token));
}
return list;
}
private static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException {
static Object readValue(XContentParser parser, MapFactory mapFactory, XContentParser.Token token) throws IOException {
if (token == XContentParser.Token.VALUE_NULL) {
return null;
} else if (token == XContentParser.Token.VALUE_STRING) {
@ -307,7 +313,7 @@ public abstract class AbstractXContentParser implements XContentParser {
} else if (token == XContentParser.Token.START_OBJECT) {
return readMap(parser, mapFactory);
} else if (token == XContentParser.Token.START_ARRAY) {
return readList(parser, mapFactory, token);
return readList(parser, mapFactory);
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
return parser.binaryValue();
}

View File

@ -39,6 +39,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.discovery.zen.ping.PingContextProvider;
import org.elasticsearch.discovery.zen.ping.ZenPing;
@ -408,9 +409,9 @@ public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implem
xContentType = XContentFactory.xContentType(data);
if (xContentType != null) {
// an external ping
externalPingData = XContentFactory.xContent(xContentType)
.createParser(data)
.mapAndClose();
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(data)) {
externalPingData = parser.map();
}
} else {
throw new IllegalStateException("failed multicast message, probably message from previous version");
}

View File

@ -33,12 +33,13 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
@ -54,7 +55,6 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
/**
*
@ -148,9 +148,9 @@ public class GatewayAllocator extends AbstractComponent {
// First, handle primaries, they must find a place to be allocated on here
MetaData metaData = routingNodes.metaData();
Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
Iterator<ShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
MutableShardRouting shard = unassignedIterator.next();
ShardRouting shard = unassignedIterator.next();
if (!shard.primary()) {
continue;
@ -330,7 +330,7 @@ public class GatewayAllocator extends AbstractComponent {
// we found a match
changed = true;
// make sure we create one with the version from the recovered state
allocation.routingNodes().assign(new MutableShardRouting(shard, highestVersion), node.nodeId());
allocation.routingNodes().assign(new ShardRouting(shard, highestVersion), node.nodeId());
unassignedIterator.remove();
// found a node, so no throttling, no "no", and break out of the loop
@ -350,7 +350,7 @@ public class GatewayAllocator extends AbstractComponent {
// we found a match
changed = true;
// make sure we create one with the version from the recovered state
allocation.routingNodes().assign(new MutableShardRouting(shard, highestVersion), node.nodeId());
allocation.routingNodes().assign(new ShardRouting(shard, highestVersion), node.nodeId());
unassignedIterator.remove();
}
} else {
@ -370,7 +370,7 @@ public class GatewayAllocator extends AbstractComponent {
// Now, handle replicas, try to assign them to nodes that are similar to the one the primary was allocated on
unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
MutableShardRouting shard = unassignedIterator.next();
ShardRouting shard = unassignedIterator.next();
if (shard.primary()) {
continue;
}
@ -448,7 +448,7 @@ public class GatewayAllocator extends AbstractComponent {
if (!shard.primary()) {
hasReplicaData |= storeFilesMetaData.iterator().hasNext();
MutableShardRouting primaryShard = routingNodes.activePrimary(shard);
ShardRouting primaryShard = routingNodes.activePrimary(shard);
if (primaryShard != null) {
assert primaryShard.active();
DiscoveryNode primaryNode = nodes.get(primaryShard.currentNodeId());

View File

@ -284,7 +284,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state");
}
Set<String> indices = new HashSet<>();
for (MutableShardRouting routing : newRoutingNode) {
for (ShardRouting routing : newRoutingNode) {
indices.add(routing.index());
}
// we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously

View File

@ -36,12 +36,10 @@ import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.filter.ShardFilterCache;
import org.elasticsearch.index.deletionpolicy.DeletionPolicyModule;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.gateway.IndexShardGatewayService;
import org.elasticsearch.index.shard.StoreRecoveryService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService;
@ -60,10 +58,8 @@ import org.elasticsearch.plugins.ShardsPluginsModule;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@ -306,12 +302,16 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
final ShardFilterCache shardFilterCache = new ShardFilterCache(shardId, injector.getInstance(IndicesFilterCache.class));
ModulesBuilder modules = new ModulesBuilder();
modules.add(new ShardsPluginsModule(indexSettings, pluginsService));
modules.add(new IndexShardModule(shardId, primary, indexSettings, shardFilterCache));
modules.add(new IndexShardModule(shardId, primary, indexSettings));
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
new StoreCloseListener(shardId, canDeleteShardContent, shardFilterCache), path));
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
@Override
public void close() throws IOException {
injector.getInstance(IndicesFilterCache.class).onClose(shardId);
}
}), path));
modules.add(new DeletionPolicyModule(indexSettings));
try {
shardInjector = modules.createChildInjector(injector);
@ -387,8 +387,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
}
}
closeInjectorResource(sId, shardInjector,
IndexShardGatewayService.class,
PercolatorQueriesRegistry.class);
StoreRecoveryService.class);
// call this before we close the store, so we can release resources for it
indicesLifecycle.afterIndexShardClosed(sId, indexShard, indexSettings);

View File

@ -52,20 +52,15 @@ public final class FieldNameAnalyzer extends DelegatingAnalyzerWrapper {
return defaultAnalyzer;
}
/** NOTE: public so MapperAnalyzer can invoke: */
@Override
public Analyzer getWrappedAnalyzer(String fieldName) {
return getAnalyzer(fieldName);
}
private Analyzer getAnalyzer(String name) {
Analyzer analyzer = analyzers.get(name);
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = analyzers.get(fieldName);
if (analyzer != null) {
return analyzer;
}
// Don't be lenient here and return the default analyzer
// Fields need to be explicitly added
throw new IllegalArgumentException("Field [" + name + "] has no associated analyzer");
throw new IllegalArgumentException("Field [" + fieldName + "] has no associated analyzer");
}
/**

View File

@ -32,7 +32,6 @@ public class ShardBitsetFilterCache extends AbstractIndexShardComponent {
private final CounterMetric totalMetric = new CounterMetric();
@Inject
public ShardBitsetFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.filter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
*/
public class ShardFilterCache implements Closeable {
final IndicesFilterCache cache;
final ShardId shardId;
public ShardFilterCache(ShardId shardId, IndicesFilterCache cache) {
this.cache = cache;
this.shardId = shardId;
}
public FilterCacheStats stats() {
return cache.getStats(shardId);
}
@Override
public void close() throws IOException {
cache.onClose(shardId);
}
}

View File

@ -39,7 +39,6 @@ public class ShardQueryCache extends AbstractIndexShardComponent implements Remo
final CounterMetric hitCount = new CounterMetric();
final CounterMetric missCount = new CounterMetric();
@Inject
public ShardQueryCache(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}

View File

@ -45,7 +45,6 @@ import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
import org.elasticsearch.index.fielddata.ShardFieldData;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument;

View File

@ -53,6 +53,7 @@ import java.util.concurrent.TimeUnit;
public final class EngineConfig {
private final ShardId shardId;
private final TranslogRecoveryPerformer translogRecoveryPerformer;
private final Settings indexSettings;
private volatile ByteSizeValue indexingBufferSize;
private volatile ByteSizeValue versionMapSize;
private volatile String versionMapSizeSetting;
@ -64,7 +65,6 @@ public final class EngineConfig {
private final boolean optimizeAutoGenerateId;
private final ThreadPool threadPool;
private final ShardIndexingService indexingService;
private final IndexSettingsService indexSettingsService;
@Nullable
private final IndicesWarmer warmer;
private final Store store;
@ -141,14 +141,14 @@ public final class EngineConfig {
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
*/
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService,
IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, TranslogConfig translogConfig) {
this.shardId = shardId;
this.indexSettings = indexSettings;
this.threadPool = threadPool;
this.indexingService = indexingService;
this.indexSettingsService = indexSettingsService;
this.warmer = warmer;
this.store = store;
this.deletionPolicy = deletionPolicy;
@ -158,7 +158,6 @@ public final class EngineConfig {
this.similarity = similarity;
this.codecService = codecService;
this.failedEngineListener = failedEngineListener;
Settings indexSettings = indexSettingsService.getSettings();
this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false);
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
this.indexConcurrency = indexSettings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
@ -364,7 +363,7 @@ public final class EngineConfig {
* Returns the latest index settings directly from the index settings service.
*/
public Settings getIndexSettings() {
return indexSettingsService.getSettings();
return indexSettings;
}
/**
@ -429,9 +428,4 @@ public final class EngineConfig {
public TranslogConfig getTranslogConfig() {
return translogConfig;
}
IndexSettingsService getIndexSettingsService() { // for testing
return indexSettingsService;
}
}

View File

@ -178,8 +178,12 @@ public class InternalEngine extends Engine {
}
}
final Translog translog = new Translog(translogConfig);
if (generation == null) {
logger.debug("no translog ID present in the current generation - creating one");
if (generation == null || generation.translogUUID == null) {
if (generation == null) {
logger.debug("no translog ID present in the current generation - creating one");
} else if (generation.translogUUID == null) {
logger.debug("upgraded translog to pre 2.0 format, associating translog with index - writing translog UUID");
}
boolean success = false;
try {
commitIndexWriter(writer, translog);

View File

@ -36,18 +36,12 @@ import java.util.concurrent.ConcurrentMap;
/**
*/
public class ShardFieldData extends AbstractIndexShardComponent implements IndexFieldDataCache.Listener {
public class ShardFieldData implements IndexFieldDataCache.Listener {
final CounterMetric evictionsMetric = new CounterMetric();
final CounterMetric totalMetric = new CounterMetric();
final ConcurrentMap<String, CounterMetric> perFieldTotals = ConcurrentCollections.newConcurrentMap();
@Inject
public ShardFieldData(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}
public FieldDataStats stats(String... fields) {
ObjectLongHashMap<String> fieldTotals = null;
if (fields != null && fields.length > 0) {

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.elasticsearch.common.inject.AbstractModule;
/**
*/
public class ShardFieldDataModule extends AbstractModule {
@Override
protected void configure() {
bind(ShardFieldData.class).asEagerSingleton();
}
}

View File

@ -109,9 +109,20 @@ public class FieldsVisitor extends StoredFieldVisitor {
public void postProcess(DocumentMapper documentMapper) {
for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
FieldMapper fieldMapper = documentMapper.mappers().indexName(entry.getKey()).mapper();
String indexName = entry.getKey();
FieldMapper fieldMapper = documentMapper.mappers().getMapper(indexName);
if (fieldMapper == null) {
continue;
// it's possible index name doesn't match field name (legacy feature)
for (FieldMapper mapper : documentMapper.mappers()) {
if (mapper.fieldType().names().indexName().equals(indexName)) {
fieldMapper = mapper;
break;
}
}
if (fieldMapper == null) {
// no index name or full name found, so skip
continue;
}
}
List<Object> fieldValues = entry.getValue();
for (int i = 0; i < fieldValues.size(); i++) {

View File

@ -1,39 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.elasticsearch.index.shard.IndexShardException;
import org.elasticsearch.index.shard.ShardId;
/**
* An exception marking that this recovery attempt should be ignored (since probably, we already recovered).
*
*
*/
public class IgnoreGatewayRecoveryException extends IndexShardException {
public IgnoreGatewayRecoveryException(ShardId shardId, String msg) {
super(shardId, msg);
}
public IgnoreGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) {
super(shardId, msg, cause);
}
}

View File

@ -1,192 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.Directory;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryState;
import java.io.Closeable;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/**
*
*/
public class IndexShardGateway extends AbstractIndexShardComponent implements Closeable {
private final MappingUpdatedAction mappingUpdatedAction;
private final IndexService indexService;
private final IndexShard indexShard;
private final TimeValue waitForMappingUpdatePostRecovery;
private final CancellableThreads cancellableThreads = new CancellableThreads();
@Inject
public IndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, MappingUpdatedAction mappingUpdatedAction,
IndexService indexService, IndexShard indexShard) {
super(shardId, indexSettings);
this.mappingUpdatedAction = mappingUpdatedAction;
this.indexService = indexService;
this.indexShard = indexShard;
this.waitForMappingUpdatePostRecovery = indexSettings.getAsTime("index.gateway.wait_for_mapping_update_post_recovery", TimeValue.timeValueSeconds(15));
}
/**
* Recovers the state of the shard from the gateway.
*/
public void recover(boolean indexShouldExists, RecoveryState recoveryState) throws IndexShardGatewayRecoveryException {
indexShard.prepareForIndexRecovery();
long version = -1;
final Map<String, Mapping> typesToUpdate;
SegmentInfos si = null;
final Store store = indexShard.store();
store.incRef();
try {
try {
store.failIfCorrupted();
try {
si = store.readLastCommittedSegmentsInfo();
} catch (Throwable e) {
String files = "_unknown_";
try {
files = Arrays.toString(store.directory().listAll());
} catch (Throwable e1) {
files += " (failure=" + ExceptionsHelper.detailedMessage(e1) + ")";
}
if (indexShouldExists) {
throw new IndexShardGatewayRecoveryException(shardId(), "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
}
}
if (si != null) {
if (indexShouldExists) {
version = si.getVersion();
} else {
// it exists on the directory, but shouldn't exist on the FS, its a leftover (possibly dangling)
// its a "new index create" API, we have to do something, so better to clean it than use same data
logger.trace("cleaning existing shard, shouldn't exists");
IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
writer.close();
recoveryState.getTranslog().totalOperations(0);
}
}
} catch (Throwable e) {
throw new IndexShardGatewayRecoveryException(shardId(), "failed to fetch index version after copying it over", e);
}
recoveryState.getIndex().updateVersion(version);
// since we recover from local, just fill the files and size
try {
final RecoveryState.Index index = recoveryState.getIndex();
if (si != null) {
final Directory directory = store.directory();
for (String name : Lucene.files(si)) {
long length = directory.fileLength(name);
index.addFileDetail(name, length, true);
}
}
} catch (IOException e) {
logger.debug("failed to list file details", e);
}
if (indexShouldExists == false) {
recoveryState.getTranslog().totalOperations(0);
recoveryState.getTranslog().totalOperationsOnStart(0);
}
typesToUpdate = indexShard.performTranslogRecovery();
indexShard.finalizeRecovery();
for (Map.Entry<String, Mapping> entry : typesToUpdate.entrySet()) {
validateMappingUpdate(entry.getKey(), entry.getValue());
}
indexShard.postRecovery("post recovery from gateway");
} catch (EngineException e) {
throw new IndexShardGatewayRecoveryException(shardId, "failed to recovery from gateway", e);
} finally {
store.decRef();
}
}
private void validateMappingUpdate(final String type, Mapping update) {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> error = new AtomicReference<>();
mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), type, update, waitForMappingUpdatePostRecovery, new MappingUpdatedAction.MappingUpdateListener() {
@Override
public void onMappingUpdate() {
latch.countDown();
}
@Override
public void onFailure(Throwable t) {
latch.countDown();
error.set(t);
}
});
cancellableThreads.execute(new CancellableThreads.Interruptable() {
@Override
public void run() throws InterruptedException {
try {
if (latch.await(waitForMappingUpdatePostRecovery.millis(), TimeUnit.MILLISECONDS) == false) {
logger.debug("waited for mapping update on master for [{}], yet timed out", type);
} else {
if (error.get() != null) {
throw new IndexShardGatewayRecoveryException(shardId, "Failed to propagate mappings on master post recovery", error.get());
}
}
} catch (InterruptedException e) {
logger.debug("interrupted while waiting for mapping update");
throw e;
}
}
});
}
@Override
public void close() {
cancellableThreads.cancel("closed");
}
@Override
public String toString() {
return "shard_gateway";
}
}

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.elasticsearch.index.shard.IndexShardException;
import org.elasticsearch.index.shard.ShardId;
/**
*
*/
public class IndexShardGatewayException extends IndexShardException {
public IndexShardGatewayException(ShardId shardId, String msg) {
super(shardId, msg);
}
public IndexShardGatewayException(ShardId shardId, String msg, Throwable cause) {
super(shardId, msg, cause);
}
}

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.elasticsearch.index.shard.ShardId;
/**
*
*/
public class IndexShardGatewayRecoveryException extends IndexShardGatewayException {
public IndexShardGatewayRecoveryException(ShardId shardId, String msg) {
super(shardId, msg);
}
public IndexShardGatewayRecoveryException(ShardId shardId, String msg, Throwable cause) {
super(shardId, msg, cause);
}
}

View File

@ -1,174 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.*;
import org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.io.IOException;
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
/**
*
*/
public class IndexShardGatewayService extends AbstractIndexShardComponent implements Closeable {
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final IndexShard indexShard;
private final IndexShardGateway shardGateway;
private final IndexShardSnapshotAndRestoreService snapshotService;
@Inject
public IndexShardGatewayService(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
IndexShard indexShard, IndexShardGateway shardGateway, IndexShardSnapshotAndRestoreService snapshotService, ClusterService clusterService) {
super(shardId, indexSettings);
this.threadPool = threadPool;
this.indexShard = indexShard;
this.shardGateway = shardGateway;
this.snapshotService = snapshotService;
this.clusterService = clusterService;
}
/**
* Should be called when the shard routing state has changed (note, after the state has been set on the shard).
*/
public void routingStateChanged() {
}
public static interface RecoveryListener {
void onRecoveryDone();
void onIgnoreRecovery(String reason);
void onRecoveryFailed(IndexShardGatewayRecoveryException e);
}
/**
* Recovers the state of the shard from the gateway.
*/
public void recover(final boolean indexShouldExists, final RecoveryListener listener) throws IndexShardGatewayRecoveryException, IgnoreGatewayRecoveryException {
if (indexShard.state() == IndexShardState.CLOSED) {
// got closed on us, just ignore this recovery
listener.onIgnoreRecovery("shard closed");
return;
}
if (!indexShard.routingEntry().primary()) {
listener.onRecoveryFailed(new IndexShardGatewayRecoveryException(shardId, "Trying to recover when the shard is in backup state", null));
return;
}
try {
if (indexShard.routingEntry().restoreSource() != null) {
indexShard.recovering("from snapshot", RecoveryState.Type.SNAPSHOT, indexShard.routingEntry().restoreSource());
} else {
indexShard.recovering("from gateway", RecoveryState.Type.GATEWAY, clusterService.localNode());
}
} catch (IllegalIndexShardStateException e) {
// that's fine, since we might be called concurrently, just ignore this, we are already recovering
listener.onIgnoreRecovery("already in recovering process, " + e.getMessage());
return;
}
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
try {
final RecoveryState recoveryState = indexShard.recoveryState();
if (indexShard.routingEntry().restoreSource() != null) {
logger.debug("restoring from {} ...", indexShard.routingEntry().restoreSource());
snapshotService.restore(recoveryState);
} else {
logger.debug("starting recovery from {} ...", shardGateway);
shardGateway.recover(indexShouldExists, recoveryState);
}
// Check that the gateway didn't leave the shard in init or recovering stage. it is up to the gateway
// to call post recovery.
IndexShardState shardState = indexShard.state();
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append("recovery completed from ").append(shardGateway).append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n");
RecoveryState.Index index = recoveryState.getIndex();
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.totalBytes())).append("], took[")
.append(TimeValue.timeValueMillis(index.time())).append("]\n");
sb.append(" : recovered_files [").append(index.recoveredFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.recoveredBytes())).append("]\n");
sb.append(" : reusing_files [").append(index.reusedFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.reusedBytes())).append("]\n");
sb.append(" verify_index : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [")
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [{}], took [{}]", shardGateway, timeValueMillis(recoveryState.getTimer().time()));
}
listener.onRecoveryDone();
} catch (IndexShardGatewayRecoveryException e) {
if (indexShard.state() == IndexShardState.CLOSED) {
// got closed on us, just ignore this recovery
listener.onIgnoreRecovery("shard closed");
return;
}
if ((e.getCause() instanceof IndexShardClosedException) || (e.getCause() instanceof IndexShardNotStartedException)) {
// got closed on us, just ignore this recovery
listener.onIgnoreRecovery("shard closed");
return;
}
listener.onRecoveryFailed(e);
} catch (IndexShardClosedException e) {
listener.onIgnoreRecovery("shard closed");
} catch (IndexShardNotStartedException e) {
listener.onIgnoreRecovery("shard closed");
} catch (Exception e) {
if (indexShard.state() == IndexShardState.CLOSED) {
// got closed on us, just ignore this recovery
listener.onIgnoreRecovery("shard closed");
return;
}
listener.onRecoveryFailed(new IndexShardGatewayRecoveryException(shardId, "failed recovery", e));
}
}
});
}
@Override
public synchronized void close() throws IOException {
shardGateway.close();
}
}

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.gateway;
import org.elasticsearch.index.shard.ShardId;
/**
*
*/
public class IndexShardGatewaySnapshotNotAllowedException extends IndexShardGatewayException {
public IndexShardGatewaySnapshotNotAllowedException(ShardId shardId, String msg) {
super(shardId, msg);
}
}

View File

@ -17,9 +17,8 @@
* under the License.
*/
package org.elasticsearch.index.indexing.slowlog;
package org.elasticsearch.index.indexing;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@ -27,10 +26,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.Locale;
@ -38,7 +34,7 @@ import java.util.concurrent.TimeUnit;
/**
*/
public class ShardSlowLogIndexingService extends AbstractIndexShardComponent {
public final class IndexingSlowLog {
private boolean reformat;
@ -52,53 +48,16 @@ public class ShardSlowLogIndexingService extends AbstractIndexShardComponent {
private final ESLogger indexLogger;
private final ESLogger deleteLogger;
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN = "index.indexing.slowlog.threshold.index.warn";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO = "index.indexing.slowlog.threshold.index.info";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG = "index.indexing.slowlog.threshold.index.debug";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE = "index.indexing.slowlog.threshold.index.trace";
public static final String INDEX_INDEXING_SLOWLOG_REFORMAT = "index.indexing.slowlog.reformat";
public static final String INDEX_INDEXING_SLOWLOG_LEVEL = "index.indexing.slowlog.level";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public synchronized void onRefreshSettings(Settings settings) {
long indexWarnThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexWarnThreshold)).nanos();
if (indexWarnThreshold != ShardSlowLogIndexingService.this.indexWarnThreshold) {
ShardSlowLogIndexingService.this.indexWarnThreshold = indexWarnThreshold;
}
long indexInfoThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexInfoThreshold)).nanos();
if (indexInfoThreshold != ShardSlowLogIndexingService.this.indexInfoThreshold) {
ShardSlowLogIndexingService.this.indexInfoThreshold = indexInfoThreshold;
}
long indexDebugThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexDebugThreshold)).nanos();
if (indexDebugThreshold != ShardSlowLogIndexingService.this.indexDebugThreshold) {
ShardSlowLogIndexingService.this.indexDebugThreshold = indexDebugThreshold;
}
long indexTraceThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(ShardSlowLogIndexingService.this.indexTraceThreshold)).nanos();
if (indexTraceThreshold != ShardSlowLogIndexingService.this.indexTraceThreshold) {
ShardSlowLogIndexingService.this.indexTraceThreshold = indexTraceThreshold;
}
String level = settings.get(INDEX_INDEXING_SLOWLOG_LEVEL, ShardSlowLogIndexingService.this.level);
if (!level.equals(ShardSlowLogIndexingService.this.level)) {
ShardSlowLogIndexingService.this.indexLogger.setLevel(level.toUpperCase(Locale.ROOT));
ShardSlowLogIndexingService.this.deleteLogger.setLevel(level.toUpperCase(Locale.ROOT));
ShardSlowLogIndexingService.this.level = level;
}
boolean reformat = settings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, ShardSlowLogIndexingService.this.reformat);
if (reformat != ShardSlowLogIndexingService.this.reformat) {
ShardSlowLogIndexingService.this.reformat = reformat;
}
}
}
@Inject
public ShardSlowLogIndexingService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService) {
super(shardId, indexSettings);
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug";
public static final String INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE = INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace";
public static final String INDEX_INDEXING_SLOWLOG_REFORMAT = INDEX_INDEXING_SLOWLOG_PREFIX +".reformat";
public static final String INDEX_INDEXING_SLOWLOG_LEVEL = INDEX_INDEXING_SLOWLOG_PREFIX +".level";
IndexingSlowLog(Settings indexSettings) {
this.reformat = indexSettings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, true);
this.indexWarnThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(-1)).nanos();
this.indexInfoThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(-1)).nanos();
this.indexDebugThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(-1)).nanos();
@ -106,20 +65,49 @@ public class ShardSlowLogIndexingService extends AbstractIndexShardComponent {
this.level = indexSettings.get(INDEX_INDEXING_SLOWLOG_LEVEL, "TRACE").toUpperCase(Locale.ROOT);
this.indexLogger = Loggers.getLogger(logger, ".index");
this.deleteLogger = Loggers.getLogger(logger, ".delete");
this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX +".index");
this.deleteLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX +".delete");
indexLogger.setLevel(level);
deleteLogger.setLevel(level);
indexSettingsService.addListener(new ApplySettings());
}
public void postIndex(Engine.Index index, long tookInNanos) {
synchronized void onRefreshSettings(Settings settings) {
long indexWarnThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(this.indexWarnThreshold)).nanos();
if (indexWarnThreshold != this.indexWarnThreshold) {
this.indexWarnThreshold = indexWarnThreshold;
}
long indexInfoThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(this.indexInfoThreshold)).nanos();
if (indexInfoThreshold != this.indexInfoThreshold) {
this.indexInfoThreshold = indexInfoThreshold;
}
long indexDebugThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(this.indexDebugThreshold)).nanos();
if (indexDebugThreshold != this.indexDebugThreshold) {
this.indexDebugThreshold = indexDebugThreshold;
}
long indexTraceThreshold = settings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(this.indexTraceThreshold)).nanos();
if (indexTraceThreshold != this.indexTraceThreshold) {
this.indexTraceThreshold = indexTraceThreshold;
}
String level = settings.get(INDEX_INDEXING_SLOWLOG_LEVEL, this.level);
if (!level.equals(this.level)) {
this.indexLogger.setLevel(level.toUpperCase(Locale.ROOT));
this.deleteLogger.setLevel(level.toUpperCase(Locale.ROOT));
this.level = level;
}
boolean reformat = settings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, this.reformat);
if (reformat != this.reformat) {
this.reformat = reformat;
}
}
void postIndex(Engine.Index index, long tookInNanos) {
postIndexing(index.parsedDoc(), tookInNanos);
}
public void postCreate(Engine.Create create, long tookInNanos) {
void postCreate(Engine.Create create, long tookInNanos) {
postIndexing(create.parsedDoc(), tookInNanos);
}
@ -135,12 +123,12 @@ public class ShardSlowLogIndexingService extends AbstractIndexShardComponent {
}
}
public static class SlowLogParsedDocumentPrinter {
final static class SlowLogParsedDocumentPrinter {
private final ParsedDocument doc;
private final long tookInNanos;
private final boolean reformat;
public SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat) {
SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat) {
this.doc = doc;
this.tookInNanos = tookInNanos;
this.reformat = reformat;

View File

@ -21,15 +21,12 @@ package org.elasticsearch.index.indexing;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
@ -42,7 +39,7 @@ import java.util.concurrent.TimeUnit;
*/
public class ShardIndexingService extends AbstractIndexShardComponent {
private final ShardSlowLogIndexingService slowLog;
private final IndexingSlowLog slowLog;
private final StatsHolder totalStats = new StatsHolder();
@ -50,10 +47,9 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
private volatile Map<String, StatsHolder> typesStats = ImmutableMap.of();
@Inject
public ShardIndexingService(ShardId shardId, @IndexSettings Settings indexSettings, ShardSlowLogIndexingService slowLog) {
public ShardIndexingService(ShardId shardId, Settings indexSettings) {
super(shardId, indexSettings);
this.slowLog = slowLog;
this.slowLog = new IndexingSlowLog(indexSettings);
}
/**
@ -252,6 +248,10 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
return stats;
}
public void onRefreshSettings(Settings settings) {
slowLog.onRefreshSettings(settings);
}
static class StatsHolder {
public final MeanMetric indexMetric = new MeanMetric();
public final MeanMetric deleteMetric = new MeanMetric();

View File

@ -22,32 +22,38 @@ package org.elasticsearch.index.mapper;
import com.google.common.base.Function;
import com.google.common.collect.Collections2;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.FieldNameAnalyzer;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
/**
*
*/
public final class DocumentFieldMappers implements Iterable<FieldMapper> {
private final FieldMappersLookup fieldMappers;
/** Full field name to mapper */
private final CopyOnWriteHashMap<String, FieldMapper> fieldMappers;
private final FieldNameAnalyzer indexAnalyzer;
private final FieldNameAnalyzer searchAnalyzer;
private final FieldNameAnalyzer searchQuoteAnalyzer;
public DocumentFieldMappers(AnalysisService analysisService) {
this(new FieldMappersLookup(), new FieldNameAnalyzer(analysisService.defaultIndexAnalyzer()),
new FieldNameAnalyzer(analysisService.defaultSearchAnalyzer()),
new FieldNameAnalyzer(analysisService.defaultSearchQuoteAnalyzer()));
this(new CopyOnWriteHashMap<String, FieldMapper>(),
new FieldNameAnalyzer(analysisService.defaultIndexAnalyzer()),
new FieldNameAnalyzer(analysisService.defaultSearchAnalyzer()),
new FieldNameAnalyzer(analysisService.defaultSearchQuoteAnalyzer()));
}
private DocumentFieldMappers(FieldMappersLookup fieldMappers, FieldNameAnalyzer indexAnalyzer, FieldNameAnalyzer searchAnalyzer, FieldNameAnalyzer searchQuoteAnalyzer) {
private DocumentFieldMappers(CopyOnWriteHashMap<String, FieldMapper> fieldMappers, FieldNameAnalyzer indexAnalyzer, FieldNameAnalyzer searchAnalyzer, FieldNameAnalyzer searchQuoteAnalyzer) {
this.fieldMappers = fieldMappers;
this.indexAnalyzer = indexAnalyzer;
this.searchAnalyzer = searchAnalyzer;
@ -55,7 +61,10 @@ public final class DocumentFieldMappers implements Iterable<FieldMapper> {
}
public DocumentFieldMappers copyAndAllAll(Collection<FieldMapper> newMappers) {
FieldMappersLookup fieldMappers = this.fieldMappers.copyAndAddAll(newMappers);
CopyOnWriteHashMap<String, FieldMapper> map = this.fieldMappers;
for (FieldMapper fieldMapper : newMappers) {
map = map.copyAndPut(fieldMapper.fieldType().names().fullName(), fieldMapper);
}
FieldNameAnalyzer indexAnalyzer = this.indexAnalyzer.copyAndAddAll(Collections2.transform(newMappers, new Function<FieldMapper, Map.Entry<String, Analyzer>>() {
@Override
public Map.Entry<String, Analyzer> apply(FieldMapper input) {
@ -74,22 +83,7 @@ public final class DocumentFieldMappers implements Iterable<FieldMapper> {
return Maps.immutableEntry(input.fieldType().names().indexName(), (Analyzer)input.fieldType().searchQuoteAnalyzer());
}
}));
return new DocumentFieldMappers(fieldMappers, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer);
}
/**
* Looks up a field by its index name.
*
* Overriding index name for a field is no longer possibly, and only supported for backcompat.
* This function first attempts to lookup the field by full name, and only when that fails,
* does a full scan of all field mappers, collecting those with this index name.
*
* This will be removed in 3.0, once backcompat for overriding index name is removed.
* @deprecated Use {@link #getMapper(String)}
*/
@Deprecated
public FieldMappers indexName(String indexName) {
return fieldMappers.indexName(indexName);
return new DocumentFieldMappers(map, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer);
}
/** Returns the mapper for the given field */
@ -97,23 +91,29 @@ public final class DocumentFieldMappers implements Iterable<FieldMapper> {
return fieldMappers.get(field);
}
Collection<String> simpleMatchToIndexNames(String pattern) {
return fieldMappers.simpleMatchToIndexNames(pattern);
}
public Collection<String> simpleMatchToFullName(String pattern) {
return fieldMappers.simpleMatchToFullName(pattern);
}
/**
* Tries to find first based on fullName, then by indexName.
*/
FieldMappers smartName(String name) {
return fieldMappers.smartName(name);
Set<String> fields = Sets.newHashSet();
for (FieldMapper fieldMapper : this) {
if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().fullName())) {
fields.add(fieldMapper.fieldType().names().fullName());
} else if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().indexName())) {
fields.add(fieldMapper.fieldType().names().fullName());
}
}
return fields;
}
public FieldMapper smartNameFieldMapper(String name) {
return fieldMappers.smartNameFieldMapper(name);
FieldMapper fieldMapper = getMapper(name);
if (fieldMapper != null) {
return fieldMapper;
}
for (FieldMapper otherFieldMapper : this) {
if (otherFieldMapper.fieldType().names().indexName().equals(name)) {
return otherFieldMapper;
}
}
return null;
}
/**
@ -145,6 +145,6 @@ public final class DocumentFieldMappers implements Iterable<FieldMapper> {
}
public Iterator<FieldMapper> iterator() {
return fieldMappers.iterator();
return fieldMappers.values().iterator();
}
}

View File

@ -23,7 +23,6 @@ import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@ -32,7 +31,6 @@ import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.compress.CompressedXContent;
@ -43,6 +41,7 @@ import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.Mapping.SourceTransform;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
@ -96,28 +95,32 @@ public class DocumentMapper implements ToXContent {
private final Mapper.BuilderContext builderContext;
public Builder(String index, Settings indexSettings, RootObjectMapper.Builder builder) {
public Builder(String index, Settings indexSettings, RootObjectMapper.Builder builder, MapperService mapperService) {
this.index = index;
this.indexSettings = indexSettings;
this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1));
this.rootObjectMapper = builder.build(builderContext);
// TODO: find a cleaner way to handle existing root mappings and using their field type as the default.
// the vast majority of these root mappers only need the existing type for backwards compatibility, since
// the pre 2.0 field type settings could be modified
// UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination
this.rootMappers.put(UidFieldMapper.class, new UidFieldMapper(indexSettings));
this.rootMappers.put(IdFieldMapper.class, new IdFieldMapper(indexSettings));
this.rootMappers.put(RoutingFieldMapper.class, new RoutingFieldMapper(indexSettings));
this.rootMappers.put(UidFieldMapper.class, new UidFieldMapper(indexSettings, mapperService.fullName(UidFieldMapper.NAME)));
this.rootMappers.put(IdFieldMapper.class, new IdFieldMapper(indexSettings, mapperService.fullName(IdFieldMapper.NAME)));
this.rootMappers.put(RoutingFieldMapper.class, new RoutingFieldMapper(indexSettings, mapperService.fullName(RoutingFieldMapper.NAME)));
// add default mappers, order is important (for example analyzer should come before the rest to set context.analyzer)
this.rootMappers.put(SizeFieldMapper.class, new SizeFieldMapper(indexSettings));
this.rootMappers.put(IndexFieldMapper.class, new IndexFieldMapper(indexSettings));
this.rootMappers.put(SizeFieldMapper.class, new SizeFieldMapper(indexSettings, mapperService.fullName(SizeFieldMapper.NAME)));
this.rootMappers.put(IndexFieldMapper.class, new IndexFieldMapper(indexSettings, mapperService.fullName(IndexFieldMapper.NAME)));
this.rootMappers.put(SourceFieldMapper.class, new SourceFieldMapper(indexSettings));
this.rootMappers.put(TypeFieldMapper.class, new TypeFieldMapper(indexSettings));
this.rootMappers.put(AllFieldMapper.class, new AllFieldMapper(indexSettings));
this.rootMappers.put(TimestampFieldMapper.class, new TimestampFieldMapper(indexSettings));
this.rootMappers.put(TypeFieldMapper.class, new TypeFieldMapper(indexSettings, mapperService.fullName(TypeFieldMapper.NAME)));
this.rootMappers.put(AllFieldMapper.class, new AllFieldMapper(indexSettings, mapperService.fullName(AllFieldMapper.NAME)));
this.rootMappers.put(TimestampFieldMapper.class, new TimestampFieldMapper(indexSettings, mapperService.fullName(TimestampFieldMapper.NAME)));
this.rootMappers.put(TTLFieldMapper.class, new TTLFieldMapper(indexSettings));
this.rootMappers.put(VersionFieldMapper.class, new VersionFieldMapper(indexSettings));
this.rootMappers.put(ParentFieldMapper.class, new ParentFieldMapper(indexSettings));
this.rootMappers.put(ParentFieldMapper.class, new ParentFieldMapper(indexSettings, mapperService.fullName(ParentFieldMapper.NAME)));
// _field_names last so that it can see all other fields
this.rootMappers.put(FieldNamesFieldMapper.class, new FieldNamesFieldMapper(indexSettings));
this.rootMappers.put(FieldNamesFieldMapper.class, new FieldNamesFieldMapper(indexSettings, mapperService.fullName(FieldNamesFieldMapper.NAME)));
}
public Builder meta(ImmutableMap<String, Object> meta) {
@ -393,87 +396,40 @@ public class DocumentMapper implements ToXContent {
return DocumentParser.transformSourceAsMap(mapping, sourceAsMap);
}
private void addFieldMappers(Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread();
this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers);
mapperService.addFieldMappers(fieldMappers);
}
public boolean isParent(String type) {
return mapperService.getParentTypes().contains(type);
}
private void addObjectMappers(Collection<ObjectMapper> objectMappers) {
private void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread();
MapBuilder<String, ObjectMapper> builder = MapBuilder.newMapBuilder(this.objectMappers);
for (ObjectMapper objectMapper : objectMappers) {
builder.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNestedObjects = true;
}
// first ensure we don't have any incompatible new fields
mapperService.checkNewMappersCompatibility(objectMappers, fieldMappers, true);
// update mappers for this document type
MapBuilder<String, ObjectMapper> builder = MapBuilder.newMapBuilder(this.objectMappers);
for (ObjectMapper objectMapper : objectMappers) {
builder.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNestedObjects = true;
}
this.objectMappers = builder.immutableMap();
mapperService.addObjectMappers(objectMappers);
}
private MergeResult newMergeContext(boolean simulate) {
return new MergeResult(simulate) {
final List<String> conflicts = new ArrayList<>();
final List<FieldMapper> newFieldMappers = new ArrayList<>();
final List<ObjectMapper> newObjectMappers = new ArrayList<>();
@Override
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
assert simulate() == false;
newFieldMappers.addAll(fieldMappers);
}
@Override
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
assert simulate() == false;
newObjectMappers.addAll(objectMappers);
}
@Override
public Collection<FieldMapper> getNewFieldMappers() {
return newFieldMappers;
}
@Override
public Collection<ObjectMapper> getNewObjectMappers() {
return newObjectMappers;
}
@Override
public void addConflict(String mergeFailure) {
conflicts.add(mergeFailure);
}
@Override
public boolean hasConflicts() {
return conflicts.isEmpty() == false;
}
@Override
public String[] buildConflicts() {
return conflicts.toArray(Strings.EMPTY_ARRAY);
}
};
}
public MergeResult merge(Mapping mapping, boolean simulate) {
try (ReleasableLock lock = mappingWriteLock.acquire()) {
final MergeResult mergeResult = newMergeContext(simulate);
this.mapping.merge(mapping, mergeResult);
if (simulate == false) {
addFieldMappers(mergeResult.getNewFieldMappers());
addObjectMappers(mergeResult.getNewObjectMappers());
refreshSource();
}
return mergeResult;
this.objectMappers = builder.immutableMap();
this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers);
// finally update for the entire index
mapperService.addMappers(objectMappers, fieldMappers);
}
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) {
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
this.mapping.merge(mapping, mergeResult);
if (simulate == false) {
addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers());
refreshSource();
}
return mergeResult;
}
}
private void refreshSource() throws ElasticsearchGenerationException {
@ -486,10 +442,6 @@ public class DocumentMapper implements ToXContent {
public void close() {
documentParser.close();
mapping.root.close();
for (RootMapper rootMapper : mapping.rootMappers) {
rootMapper.close();
}
}
@Override

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.AnalysisService;
@ -167,7 +168,7 @@ public class DocumentMapperParser extends AbstractIndexComponent {
}
public Mapper.TypeParser.ParserContext parserContext() {
return new Mapper.TypeParser.ParserContext(analysisService, similarityLookupService, typeParsers, indexVersionCreated);
return new Mapper.TypeParser.ParserContext(analysisService, similarityLookupService, mapperService, typeParsers, indexVersionCreated);
}
public DocumentMapper parse(String source) throws MapperParsingException {
@ -227,7 +228,7 @@ public class DocumentMapperParser extends AbstractIndexComponent {
Mapper.TypeParser.ParserContext parserContext = parserContext();
// parse RootObjectMapper
DocumentMapper.Builder docBuilder = doc(index.name(), indexSettings, (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext));
DocumentMapper.Builder docBuilder = doc(index.name(), indexSettings, (RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
Iterator<Map.Entry<String, Object>> iterator = mapping.entrySet().iterator();
// parse DocumentMapper
while(iterator.hasNext()) {
@ -304,8 +305,8 @@ public class DocumentMapperParser extends AbstractIndexComponent {
private Tuple<String, Map<String, Object>> extractMapping(String type, String source) throws MapperParsingException {
Map<String, Object> root;
try {
root = XContentFactory.xContent(source).createParser(source).mapOrderedAndClose();
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
root = parser.mapOrdered();
} catch (Exception e) {
throw new MapperParsingException("failed to parse mapping definition", e);
}

View File

@ -33,6 +33,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.index.mapper.core.AbstractFieldMapper.Builder;
import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;
import org.elasticsearch.index.mapper.core.LongFieldMapper.LongFieldType;
import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.object.ArrayValueMapperParser;
@ -438,6 +445,174 @@ class DocumentParser implements Closeable {
}
}
private static Mapper.Builder<?,?> createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) {
Mapper.Builder builder = null;
if (fieldType instanceof StringFieldType) {
builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
if (builder == null) {
builder = MapperBuilders.stringField(currentFieldName);
}
} else if (fieldType instanceof DateFieldType) {
builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
if (builder == null) {
builder = MapperBuilders.dateField(currentFieldName);
}
} else if (fieldType.numericType() != null) {
switch (fieldType.numericType()) {
case LONG:
builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
break;
case DOUBLE:
builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
break;
case INT:
builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
if (builder == null) {
builder = MapperBuilders.integerField(currentFieldName);
}
break;
case FLOAT:
builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
if (builder == null) {
builder = MapperBuilders.floatField(currentFieldName);
}
break;
default:
throw new AssertionError("Unexpected numeric type " + fieldType.numericType());
}
}
return builder;
}
private static Mapper.Builder<?,?> createBuilderFromDynamicValue(final ParseContext context, XContentParser.Token token, String currentFieldName) throws IOException {
if (token == XContentParser.Token.VALUE_STRING) {
// do a quick test to see if its fits a dynamic template, if so, use it.
// we need to do it here so we can handle things like attachment templates, where calling
// text (to see if its a date) causes the binary value to be cleared
{
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null);
if (builder != null) {
return builder;
}
}
if (context.root().dateDetection()) {
String text = context.parser().text();
// a safe check since "1" gets parsed as well
if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) {
for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) {
try {
dateTimeFormatter.parser().parseMillis(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
if (builder == null) {
builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter);
}
return builder;
} catch (Exception e) {
// failure to parse this, continue
}
}
}
}
if (context.root().numericDetection()) {
String text = context.parser().text();
try {
Long.parseLong(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
return builder;
} catch (NumberFormatException e) {
// not a long number
}
try {
Double.parseDouble(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
return builder;
} catch (NumberFormatException e) {
// not a long number
}
}
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
if (builder == null) {
builder = MapperBuilders.stringField(currentFieldName);
}
return builder;
} else if (token == XContentParser.Token.VALUE_NUMBER) {
XContentParser.NumberType numberType = context.parser().numberType();
if (numberType == XContentParser.NumberType.INT) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
return builder;
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
if (builder == null) {
builder = MapperBuilders.integerField(currentFieldName);
}
return builder;
}
} else if (numberType == XContentParser.NumberType.LONG) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
return builder;
} else if (numberType == XContentParser.NumberType.FLOAT) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
return builder;
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
if (builder == null) {
builder = MapperBuilders.floatField(currentFieldName);
}
return builder;
}
} else if (numberType == XContentParser.NumberType.DOUBLE) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
return builder;
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean");
if (builder == null) {
builder = MapperBuilders.booleanField(currentFieldName);
}
return builder;
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary");
if (builder == null) {
builder = MapperBuilders.binaryField(currentFieldName);
}
return builder;
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null);
if (builder != null) {
return builder;
}
}
// TODO how do we identify dynamically that its a binary value?
throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]");
}
private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
ObjectMapper.Dynamic dynamic = parentMapper.dynamic();
if (dynamic == null) {
@ -449,140 +624,38 @@ class DocumentParser implements Closeable {
if (dynamic == ObjectMapper.Dynamic.FALSE) {
return null;
}
Mapper mapper = null;
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
if (token == XContentParser.Token.VALUE_STRING) {
boolean resolved = false;
// do a quick test to see if its fits a dynamic template, if so, use it.
// we need to do it here so we can handle things like attachment templates, where calling
// text (to see if its a date) causes the binary value to be cleared
if (!resolved) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null);
if (builder != null) {
mapper = builder.build(builderContext);
resolved = true;
}
}
if (!resolved && context.root().dateDetection()) {
String text = context.parser().text();
// a safe check since "1" gets parsed as well
if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) {
for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) {
try {
dateTimeFormatter.parser().parseMillis(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date");
if (builder == null) {
builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter);
}
mapper = builder.build(builderContext);
resolved = true;
break;
} catch (Exception e) {
// failure to parse this, continue
}
}
}
}
if (!resolved && context.root().numericDetection()) {
String text = context.parser().text();
try {
Long.parseLong(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
mapper = builder.build(builderContext);
resolved = true;
} catch (Exception e) {
// not a long number
}
if (!resolved) {
try {
Double.parseDouble(text);
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
mapper = builder.build(builderContext);
resolved = true;
} catch (Exception e) {
// not a long number
}
}
}
if (!resolved) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
if (builder == null) {
builder = MapperBuilders.stringField(currentFieldName);
}
mapper = builder.build(builderContext);
}
} else if (token == XContentParser.Token.VALUE_NUMBER) {
XContentParser.NumberType numberType = context.parser().numberType();
if (numberType == XContentParser.NumberType.INT) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
mapper = builder.build(builderContext);
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
if (builder == null) {
builder = MapperBuilders.integerField(currentFieldName);
}
mapper = builder.build(builderContext);
}
} else if (numberType == XContentParser.NumberType.LONG) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
mapper = builder.build(builderContext);
} else if (numberType == XContentParser.NumberType.FLOAT) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
mapper = builder.build(builderContext);
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
if (builder == null) {
builder = MapperBuilders.floatField(currentFieldName);
}
mapper = builder.build(builderContext);
}
} else if (numberType == XContentParser.NumberType.DOUBLE) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
mapper = builder.build(builderContext);
}
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean");
if (builder == null) {
builder = MapperBuilders.booleanField(currentFieldName);
}
mapper = builder.build(builderContext);
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary");
if (builder == null) {
builder = MapperBuilders.binaryField(currentFieldName);
}
mapper = builder.build(builderContext);
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null);
final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName));
Mapper.Builder builder = null;
if (existingFieldType != null) {
// create a builder of the same type
builder = createBuilderFromFieldType(context, existingFieldType, currentFieldName);
if (builder != null) {
mapper = builder.build(builderContext);
} else {
// TODO how do we identify dynamically that its a binary value?
throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]");
// best-effort to not introduce a conflict
if (builder instanceof StringFieldMapper.Builder) {
StringFieldMapper.Builder stringBuilder = (StringFieldMapper.Builder) builder;
stringBuilder.store(existingFieldType.stored());
stringBuilder.indexOptions(existingFieldType.indexOptions());
stringBuilder.tokenized(existingFieldType.tokenized());
stringBuilder.omitNorms(existingFieldType.omitNorms());
stringBuilder.docValues(existingFieldType.hasDocValues());
stringBuilder.indexAnalyzer(existingFieldType.indexAnalyzer());
stringBuilder.searchAnalyzer(existingFieldType.searchAnalyzer());
} else if (builder instanceof NumberFieldMapper.Builder) {
NumberFieldMapper.Builder<?,?> numberBuilder = (NumberFieldMapper.Builder<?, ?>) builder;
numberBuilder.store(existingFieldType.stored());
numberBuilder.indexOptions(existingFieldType.indexOptions());
numberBuilder.tokenized(existingFieldType.tokenized());
numberBuilder.omitNorms(existingFieldType.omitNorms());
numberBuilder.docValues(existingFieldType.hasDocValues());
numberBuilder.precisionStep(existingFieldType.numericPrecisionStep());
}
}
}
if (builder == null) {
builder = createBuilderFromDynamicValue(context, token, currentFieldName);
}
Mapper mapper = builder.build(builderContext);
mapper = parseAndMergeUpdate(mapper, context);
@ -621,10 +694,9 @@ class DocumentParser implements Closeable {
/** Creates an copy of the current field with given field name and boost */
private static void parseCopy(String field, ParseContext context) throws IOException {
// TODO: this should not be indexName...
FieldMappers mappers = context.docMapper().mappers().indexName(field);
if (mappers != null && !mappers.isEmpty()) {
mappers.mapper().parse(context);
FieldMapper fieldMapper = context.docMapper().mappers().getMapper(field);
if (fieldMapper != null) {
fieldMapper.parse(context);
} else {
// The path of the dest field might be completely different from the current one so we need to reset it
context = context.overridePath(new ContentPath(0));
@ -679,7 +751,10 @@ class DocumentParser implements Closeable {
}
private static XContentParser transform(Mapping mapping, XContentParser parser) throws IOException {
Map<String, Object> transformed = transformSourceAsMap(mapping, parser.mapOrderedAndClose());
Map<String, Object> transformed;
try (XContentParser _ = parser) {
transformed = transformSourceAsMap(mapping, parser.mapOrdered());
}
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).value(transformed);
return parser.contentType().xContent().createParser(builder.bytes());
}

View File

@ -32,6 +32,15 @@ public interface FieldMapper extends Mapper {
MappedFieldType fieldType();
/** Returns a reference to the MappedFieldType for this mapper. */
MappedFieldTypeReference fieldTypeReference();
/**
* Updates the reference to this field's MappedFieldType.
* Implementations should assert equality of the underlying field type
*/
void setFieldTypeReference(MappedFieldTypeReference ref);
/**
* List of fields where this field should be copied to
*/

View File

@ -1,193 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import com.google.common.collect.Sets;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.regex.Regex;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
/**
* A class that holds a map of field mappers from name, index name, and full name.
*/
class FieldMappersLookup implements Iterable<FieldMapper> {
/** Full field name to mappers */
private final CopyOnWriteHashMap<String, FieldMappers> mappers;
/** Create a new empty instance. */
public FieldMappersLookup() {
mappers = new CopyOnWriteHashMap<>();
}
private FieldMappersLookup(CopyOnWriteHashMap<String, FieldMappers> map) {
mappers = map;
}
/**
* Return a new instance that contains the union of this instance and the provided mappers.
*/
public FieldMappersLookup copyAndAddAll(Collection<FieldMapper> newMappers) {
CopyOnWriteHashMap<String, FieldMappers> map = this.mappers;
for (FieldMapper mapper : newMappers) {
String key = mapper.fieldType().names().fullName();
FieldMappers mappers = map.get(key);
if (mappers == null) {
mappers = new FieldMappers(mapper);
} else {
mappers = mappers.concat(mapper);
}
map = map.copyAndPut(key, mappers);
}
return new FieldMappersLookup(map);
}
/**
* Returns the field mappers based on the mapper index name.
* NOTE: this only exists for backcompat support and if the index name
* does not match it's field name, this is a linear time operation
* @deprecated Use {@link #get(String)}
*/
@Deprecated
public FieldMappers indexName(String indexName) {
FieldMappers fieldMappers = fullName(indexName);
if (fieldMappers != null) {
if (fieldMappers.mapper().fieldType().names().indexName().equals(indexName)) {
return fieldMappers;
}
}
fieldMappers = new FieldMappers();
for (FieldMapper mapper : this) {
if (mapper.fieldType().names().indexName().equals(indexName)) {
fieldMappers = fieldMappers.concat(mapper);
}
}
if (fieldMappers.isEmpty()) {
return null;
}
return fieldMappers;
}
/**
* Returns the field mappers based on the mapper full name.
*/
public FieldMappers fullName(String fullName) {
return mappers.get(fullName);
}
/** Returns the mapper for the given field */
public FieldMapper get(String field) {
FieldMappers fieldMappers = mappers.get(field);
if (fieldMappers == null) {
return null;
}
if (fieldMappers.mappers().size() != 1) {
throw new IllegalStateException("Mapper for field [" + field + "] should be unique");
}
return fieldMappers.mapper();
}
/**
* Returns a list of the index names of a simple match regex like pattern against full name and index name.
*/
public Collection<String> simpleMatchToIndexNames(String pattern) {
Set<String> fields = Sets.newHashSet();
for (FieldMapper fieldMapper : this) {
if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().fullName())) {
fields.add(fieldMapper.fieldType().names().indexName());
} else if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().indexName())) {
fields.add(fieldMapper.fieldType().names().indexName());
}
}
return fields;
}
/**
* Returns a list of the full names of a simple match regex like pattern against full name and index name.
*/
public Collection<String> simpleMatchToFullName(String pattern) {
Set<String> fields = Sets.newHashSet();
for (FieldMapper fieldMapper : this) {
if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().fullName())) {
fields.add(fieldMapper.fieldType().names().fullName());
} else if (Regex.simpleMatch(pattern, fieldMapper.fieldType().names().indexName())) {
fields.add(fieldMapper.fieldType().names().fullName());
}
}
return fields;
}
/**
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}.
*/
@Nullable
FieldMappers smartName(String name) {
FieldMappers fieldMappers = fullName(name);
if (fieldMappers != null) {
return fieldMappers;
}
return indexName(name);
}
/**
* Tries to find first based on {@link #fullName(String)}, then by {@link #indexName(String)}
* and return the first mapper for it (see {@link org.elasticsearch.index.mapper.FieldMappers#mapper()}).
*/
@Nullable
public FieldMapper smartNameFieldMapper(String name) {
FieldMappers fieldMappers = smartName(name);
if (fieldMappers == null) {
return null;
}
return fieldMappers.mapper();
}
public Iterator<FieldMapper> iterator() {
final Iterator<FieldMappers> fieldsItr = mappers.values().iterator();
if (fieldsItr.hasNext() == false) {
return Collections.emptyIterator();
}
return new Iterator<FieldMapper>() {
Iterator<FieldMapper> fieldValuesItr = fieldsItr.next().iterator();
@Override
public boolean hasNext() {
return fieldsItr.hasNext() || fieldValuesItr.hasNext();
}
@Override
public FieldMapper next() {
if (fieldValuesItr.hasNext() == false && fieldsItr.hasNext()) {
fieldValuesItr = fieldsItr.next().iterator();
}
return fieldValuesItr.next();
}
@Override
public void remove() {
throw new UnsupportedOperationException("cannot remove field mapper from lookup");
}
};
}
}

View File

@ -0,0 +1,184 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import com.google.common.collect.Sets;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.regex.Regex;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/**
* An immutable container for looking up {@link MappedFieldType}s by their name.
*/
class FieldTypeLookup implements Iterable<MappedFieldType> {
private static final Function<MappedFieldTypeReference, MappedFieldType> UNWRAPPER = new Function<MappedFieldTypeReference, MappedFieldType>() {
@Override
public MappedFieldType apply(MappedFieldTypeReference ref) {
return ref.get();
}
};
/** Full field name to field type */
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> fullNameToFieldType;
/** Index field name to field type */
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> indexNameToFieldType;
/** Create a new empty instance. */
public FieldTypeLookup() {
fullNameToFieldType = new CopyOnWriteHashMap<>();
indexNameToFieldType = new CopyOnWriteHashMap<>();
}
private FieldTypeLookup(CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName, CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName) {
fullNameToFieldType = fullName;
indexNameToFieldType = indexName;
}
/**
* Return a new instance that contains the union of this instance and the field types
* from the provided fields. If a field already exists, the field type will be updated
* to use the new mappers field type.
*/
public FieldTypeLookup copyAndAddAll(Collection<FieldMapper> newFieldMappers) {
CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName = this.fullNameToFieldType;
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName = this.indexNameToFieldType;
for (FieldMapper fieldMapper : newFieldMappers) {
MappedFieldType fieldType = fieldMapper.fieldType();
MappedFieldTypeReference fullNameRef = fullName.get(fieldType.names().fullName());
MappedFieldTypeReference indexNameRef = indexName.get(fieldType.names().indexName());
if (fullNameRef == null && indexNameRef == null) {
// new field, just use the ref from this field mapper
fullName = fullName.copyAndPut(fieldType.names().fullName(), fieldMapper.fieldTypeReference());
indexName = indexName.copyAndPut(fieldType.names().indexName(), fieldMapper.fieldTypeReference());
} else if (fullNameRef == null) {
// this index name already exists, so copy over the reference
fullName = fullName.copyAndPut(fieldType.names().fullName(), indexNameRef);
indexNameRef.set(fieldMapper.fieldType()); // field type is updated, since modifiable settings may have changed
fieldMapper.setFieldTypeReference(indexNameRef);
} else if (indexNameRef == null) {
// this full name already exists, so copy over the reference
indexName = indexName.copyAndPut(fieldType.names().indexName(), fullNameRef);
fullNameRef.set(fieldMapper.fieldType()); // field type is updated, since modifiable settings may have changed
fieldMapper.setFieldTypeReference(fullNameRef);
} else if (fullNameRef == indexNameRef) {
// the field already exists, so replace the reference in this mapper with the pre-existing one
fullNameRef.set(fieldMapper.fieldType()); // field type is updated, since modifiable settings may have changed
fieldMapper.setFieldTypeReference(fullNameRef);
} else {
// this new field bridges between two existing field names (a full and index name), which we cannot support
throw new IllegalStateException("insane mappings found. field " + fieldType.names().fullName() + " maps across types to field " + fieldType.names().indexName());
}
}
return new FieldTypeLookup(fullName, indexName);
}
/**
* Checks if the given mappers' field types are compatible with existing field types.
* If any are not compatible, an IllegalArgumentException is thrown.
* If updateAllTypes is true, only basic compatibility is checked.
*/
public void checkCompatibility(Collection<FieldMapper> newFieldMappers, boolean updateAllTypes) {
for (FieldMapper fieldMapper : newFieldMappers) {
MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName());
if (ref != null) {
List<String> conflicts = new ArrayList<>();
ref.get().checkTypeName(fieldMapper.fieldType(), conflicts);
if (conflicts.isEmpty()) { // only check compat if they are the same type
boolean strict = ref.getNumAssociatedMappers() > 1 && updateAllTypes == false;
ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types" + conflicts.toString());
}
}
// field type for the index name must be compatible too
MappedFieldTypeReference indexNameRef = fullNameToFieldType.get(fieldMapper.fieldType().names().indexName());
if (indexNameRef != null) {
List<String> conflicts = new ArrayList<>();
ref.get().checkTypeName(fieldMapper.fieldType(), conflicts);
if (conflicts.isEmpty()) { // only check compat if they are the same type
boolean strict = indexNameRef.getNumAssociatedMappers() > 1 && updateAllTypes == false;
indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString());
}
}
}
}
/** Returns the field for the given field */
public MappedFieldType get(String field) {
MappedFieldTypeReference ref = fullNameToFieldType.get(field);
if (ref == null) return null;
return ref.get();
}
/** Returns the field type for the given index name */
public MappedFieldType getByIndexName(String field) {
MappedFieldTypeReference ref = indexNameToFieldType.get(field);
if (ref == null) return null;
return ref.get();
}
/**
* Returns a list of the index names of a simple match regex like pattern against full name and index name.
*/
public Collection<String> simpleMatchToIndexNames(String pattern) {
Set<String> fields = Sets.newHashSet();
for (MappedFieldType fieldType : this) {
if (Regex.simpleMatch(pattern, fieldType.names().fullName())) {
fields.add(fieldType.names().indexName());
} else if (Regex.simpleMatch(pattern, fieldType.names().indexName())) {
fields.add(fieldType.names().indexName());
}
}
return fields;
}
/**
* Returns a list of the full names of a simple match regex like pattern against full name and index name.
*/
public Collection<String> simpleMatchToFullName(String pattern) {
Set<String> fields = Sets.newHashSet();
for (MappedFieldType fieldType : this) {
if (Regex.simpleMatch(pattern, fieldType.names().fullName())) {
fields.add(fieldType.names().fullName());
} else if (Regex.simpleMatch(pattern, fieldType.names().indexName())) {
fields.add(fieldType.names().fullName());
}
}
return fields;
}
public Iterator<MappedFieldType> iterator() {
return Iterators.transform(fullNameToFieldType.values().iterator(), UNWRAPPER);
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.mapper;
import com.google.common.base.Strings;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Term;
@ -38,7 +37,6 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.fielddata.FieldDataType;
@ -52,7 +50,7 @@ import java.util.Objects;
/**
* This defines the core properties and functions to operate on a field.
*/
public class MappedFieldType extends FieldType {
public abstract class MappedFieldType extends FieldType {
public static class Names {
@ -196,12 +194,17 @@ public class MappedFieldType extends FieldType {
this.nullValueAsString = ref.nullValueAsString();
}
public MappedFieldType() {}
public MappedFieldType clone() {
return new MappedFieldType(this);
public MappedFieldType() {
setTokenized(true);
setStored(false);
setStoreTermVectors(false);
setOmitNorms(false);
setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
setBoost(1.0f);
}
public abstract MappedFieldType clone();
@Override
public boolean equals(Object o) {
if (!super.equals(o)) return false;
@ -226,10 +229,24 @@ public class MappedFieldType extends FieldType {
// norelease: we need to override freeze() and add safety checks that all settings are actually set
/** Returns the name of this type, as would be specified in mapping properties */
public abstract String typeName();
/** Checks this type is the same type as other. Adds a conflict if they are different. */
public final void checkTypeName(MappedFieldType other, List<String> conflicts) {
if (typeName().equals(other.typeName()) == false) {
conflicts.add("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
} else if (getClass() != other.getClass()) {
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName());
}
}
/**
* Checks for any conflicts between this field type and other.
* If strict is true, all properties must be equal.
* Otherwise, only properties which must never change in an index are checked.
*/
public void checkCompatibility(MappedFieldType other, List<String> conflicts) {
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
boolean indexed = indexOptions() != IndexOptions.NONE;
boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE;
// TODO: should be validating if index options go "up" (but "down" is ok)
@ -240,7 +257,7 @@ public class MappedFieldType extends FieldType {
conflicts.add("mapper [" + names().fullName() + "] has different store values");
}
if (hasDocValues() == false && other.hasDocValues()) {
// don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set
// don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitly set
// when the doc_values field data format is configured
conflicts.add("mapper [" + names().fullName() + "] has different doc_values values");
}
@ -277,10 +294,30 @@ public class MappedFieldType extends FieldType {
if (!names().equals(other.names())) {
conflicts.add("mapper [" + names().fullName() + "] has different index_name");
}
if (Objects.equals(similarity(), other.similarity()) == false) {
conflicts.add("mapper [" + names().fullName() + "] has different similarity");
}
if (strict) {
if (omitNorms() != other.omitNorms()) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [omit_norms] across all types.");
}
if (boost() != other.boost()) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types.");
}
if (normsLoading() != other.normsLoading()) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [norms].loading across all types.");
}
if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types.");
}
if (Objects.equals(fieldDataType(), other.fieldDataType()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [fielddata] across all types.");
}
if (Objects.equals(nullValue(), other.nullValue()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types.");
}
}
}
public boolean isNumeric() {

View File

@ -18,10 +18,33 @@
*/
package org.elasticsearch.index.mapper;
public class MappedFieldTypeTests extends FieldTypeTestCase {
/**
* A container for a {@link MappedFieldType} which can be updated and is reference counted.
*/
public class MappedFieldTypeReference {
private MappedFieldType fieldType; // the current field type this reference points to
private int numAssociatedMappers;
@Override
public MappedFieldType createDefaultFieldType() {
return new MappedFieldType();
public MappedFieldTypeReference(MappedFieldType fieldType) {
fieldType.freeze(); // ensure frozen
this.fieldType = fieldType;
this.numAssociatedMappers = 1;
}
public MappedFieldType get() {
return fieldType;
}
public void set(MappedFieldType fieldType) {
fieldType.freeze(); // ensure frozen
this.fieldType = fieldType;
}
public int getNumAssociatedMappers() {
return numAssociatedMappers;
}
public void incrementAssociatedMappers() {
++numAssociatedMappers;
}
}

View File

@ -86,14 +86,18 @@ public interface Mapper extends ToXContent, Iterable<Mapper> {
private final SimilarityLookupService similarityLookupService;
private final MapperService mapperService;
private final ImmutableMap<String, TypeParser> typeParsers;
private final Version indexVersionCreated;
public ParserContext(AnalysisService analysisService, SimilarityLookupService similarityLookupService,
MapperService mapperService,
ImmutableMap<String, TypeParser> typeParsers, Version indexVersionCreated) {
this.analysisService = analysisService;
this.similarityLookupService = similarityLookupService;
this.mapperService = mapperService;
this.typeParsers = typeParsers;
this.indexVersionCreated = indexVersionCreated;
}
@ -106,6 +110,10 @@ public interface Mapper extends ToXContent, Iterable<Mapper> {
return similarityLookupService;
}
public MapperService mapperService() {
return mapperService;
}
public TypeParser typeParser(String type) {
return typeParsers.get(Strings.toUnderscoreCase(type));
}
@ -121,6 +129,4 @@ public interface Mapper extends ToXContent, Iterable<Mapper> {
String name();
void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException;
void close();
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.elasticsearch.index.analysis.FieldNameAnalyzer;
/** Hacky analyzer to dispatch per-thread based on the type of the current document being indexed, to look up the per-field Analyzer. Once
* mappings are moved to the index level we can remove this. */
public class MapperAnalyzer extends DelegatingAnalyzerWrapper {
private final MapperService mapperService;
/** Which type this thread is currently indexing. */
private final ThreadLocal<String> threadTypes = new ThreadLocal<>();
public MapperAnalyzer(MapperService mapperService) {
super(Analyzer.PER_FIELD_REUSE_STRATEGY);
this.mapperService = mapperService;
}
/** Any thread that is about to use this analyzer for indexing must first set the type by calling this. */
public void setType(String type) {
threadTypes.set(type);
}
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
// First get the FieldNameAnalyzer from the type, then ask it for the right analyzer for this field, or the default index analyzer:
return ((FieldNameAnalyzer) mapperService.documentMapper(threadTypes.get()).mappers().indexAnalyzer()).getWrappedAnalyzer(fieldName);
}
}

View File

@ -28,69 +28,12 @@ import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.mapper.object.RootObjectMapper;
/**
*
*/
public final class MapperBuilders {
private MapperBuilders() {
private MapperBuilders() {}
}
public static DocumentMapper.Builder doc(String index, Settings settings, RootObjectMapper.Builder objectBuilder) {
return new DocumentMapper.Builder(index, settings, objectBuilder);
}
public static SourceFieldMapper.Builder source() {
return new SourceFieldMapper.Builder();
}
public static IdFieldMapper.Builder id() {
return new IdFieldMapper.Builder();
}
public static RoutingFieldMapper.Builder routing() {
return new RoutingFieldMapper.Builder();
}
public static UidFieldMapper.Builder uid() {
return new UidFieldMapper.Builder();
}
public static SizeFieldMapper.Builder size() {
return new SizeFieldMapper.Builder();
}
public static VersionFieldMapper.Builder version() {
return new VersionFieldMapper.Builder();
}
public static TypeFieldMapper.Builder type() {
return new TypeFieldMapper.Builder();
}
public static FieldNamesFieldMapper.Builder fieldNames() {
return new FieldNamesFieldMapper.Builder();
}
public static IndexFieldMapper.Builder index() {
return new IndexFieldMapper.Builder();
}
public static TimestampFieldMapper.Builder timestamp() {
return new TimestampFieldMapper.Builder();
}
public static TTLFieldMapper.Builder ttl() {
return new TTLFieldMapper.Builder();
}
public static ParentFieldMapper.Builder parent() {
return new ParentFieldMapper.Builder();
}
public static AllFieldMapper.Builder all() {
return new AllFieldMapper.Builder();
public static DocumentMapper.Builder doc(String index, Settings settings, RootObjectMapper.Builder objectBuilder, MapperService mapperService) {
return new DocumentMapper.Builder(index, settings, objectBuilder, mapperService);
}
public static RootObjectMapper.Builder rootObject(String name) {

View File

@ -20,8 +20,12 @@
package org.elasticsearch.index.mapper;
import com.carrotsearch.hppc.ObjectHashSet;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.*;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterators;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
@ -36,7 +40,6 @@ import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@ -83,6 +86,23 @@ public class MapperService extends AbstractIndexComponent {
"_uid", "_id", "_type", "_all", "_parent", "_routing", "_index",
"_size", "_timestamp", "_ttl"
);
private static final Function<MappedFieldType, Analyzer> INDEX_ANALYZER_EXTRACTOR = new Function<MappedFieldType, Analyzer>() {
public Analyzer apply(MappedFieldType fieldType) {
return fieldType.indexAnalyzer();
}
};
private static final Function<MappedFieldType, Analyzer> SEARCH_ANALYZER_EXTRACTOR = new Function<MappedFieldType, Analyzer>() {
public Analyzer apply(MappedFieldType fieldType) {
return fieldType.searchAnalyzer();
}
};
private static final Function<MappedFieldType, Analyzer> SEARCH_QUOTE_ANALYZER_EXTRACTOR = new Function<MappedFieldType, Analyzer>() {
public Analyzer apply(MappedFieldType fieldType) {
return fieldType.searchQuoteAnalyzer();
}
};
private final AnalysisService analysisService;
private final IndexFieldDataService fieldDataService;
@ -102,14 +122,15 @@ public class MapperService extends AbstractIndexComponent {
final ReentrantReadWriteLock mappingLock = new ReentrantReadWriteLock();
private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock());
private volatile FieldMappersLookup fieldMappers;
private volatile ImmutableOpenMap<String, ObjectMappers> fullPathObjectMappers = ImmutableOpenMap.of();
private volatile FieldTypeLookup fieldTypes;
private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of();
private boolean hasNested = false; // updated dynamically to true when a nested object is added
private final DocumentMapperParser documentParser;
private final SmartIndexNameSearchAnalyzer searchAnalyzer;
private final SmartIndexNameSearchQuoteAnalyzer searchQuoteAnalyzer;
private final MapperAnalyzerWrapper indexAnalyzer;
private final MapperAnalyzerWrapper searchAnalyzer;
private final MapperAnalyzerWrapper searchQuoteAnalyzer;
private final List<DocumentTypeListener> typeListeners = new CopyOnWriteArrayList<>();
@ -124,10 +145,11 @@ public class MapperService extends AbstractIndexComponent {
super(index, indexSettings);
this.analysisService = analysisService;
this.fieldDataService = fieldDataService;
this.fieldMappers = new FieldMappersLookup();
this.fieldTypes = new FieldTypeLookup();
this.documentParser = new DocumentMapperParser(index, indexSettings, this, analysisService, similarityLookupService, scriptService);
this.searchAnalyzer = new SmartIndexNameSearchAnalyzer(analysisService.defaultSearchAnalyzer());
this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer());
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), INDEX_ANALYZER_EXTRACTOR);
this.searchAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), SEARCH_ANALYZER_EXTRACTOR);
this.searchQuoteAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultSearchQuoteAnalyzer(), SEARCH_QUOTE_ANALYZER_EXTRACTOR);
this.dynamic = indexSettings.getAsBoolean("index.mapper.dynamic", true);
defaultPercolatorMappingSource = "{\n" +
@ -214,7 +236,7 @@ public class MapperService extends AbstractIndexComponent {
typeListeners.remove(listener);
}
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault) {
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) {
// verify we can parse it
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
@ -230,13 +252,13 @@ public class MapperService extends AbstractIndexComponent {
}
return mapper;
} else {
return merge(parse(type, mappingSource, applyDefault));
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
}
}
// never expose this to the outside world, we need to reparse the doc mapper so we get fresh
// instances of field mappers to properly remove existing doc mapper
private DocumentMapper merge(DocumentMapper mapper) {
private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) {
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
@ -262,7 +284,7 @@ public class MapperService extends AbstractIndexComponent {
DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) {
MergeResult result = oldMapper.merge(mapper.mapping(), false);
MergeResult result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
if (result.hasConflicts()) {
// TODO: What should we do???
if (logger.isDebugEnabled()) {
@ -270,19 +292,18 @@ public class MapperService extends AbstractIndexComponent {
}
}
fieldDataService.onMappingUpdate();
assert assertSerialization(oldMapper);
return oldMapper;
} else {
List<ObjectMapper> newObjectMappers = new ArrayList<>();
List<FieldMapper> newFieldMappers = new ArrayList<>();
for (RootMapper rootMapper : mapper.mapping().rootMappers) {
if (rootMapper instanceof FieldMapper) {
newFieldMappers.add((FieldMapper)rootMapper);
newFieldMappers.add((FieldMapper) rootMapper);
}
}
MapperUtils.collect(mapper.mapping().root, newObjectMappers, newFieldMappers);
addFieldMappers(newFieldMappers);
addObjectMappers(newObjectMappers);
checkNewMappersCompatibility(newObjectMappers, newFieldMappers, updateAllTypes);
addMappers(newObjectMappers, newFieldMappers);
for (DocumentTypeListener typeListener : typeListeners) {
typeListener.beforeCreate(mapper);
@ -313,28 +334,33 @@ public class MapperService extends AbstractIndexComponent {
return true;
}
protected void addObjectMappers(Collection<ObjectMapper> objectMappers) {
protected void checkNewMappersCompatibility(Collection<ObjectMapper> newObjectMappers, Collection<FieldMapper> newFieldMappers, boolean updateAllTypes) {
assert mappingLock.isWriteLockedByCurrentThread();
ImmutableOpenMap.Builder<String, ObjectMappers> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
for (ObjectMapper objectMapper : objectMappers) {
ObjectMappers mappers = fullPathObjectMappers.get(objectMapper.fullPath());
if (mappers == null) {
mappers = new ObjectMappers(objectMapper);
} else {
mappers = mappers.concat(objectMapper);
for (ObjectMapper newObjectMapper : newObjectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) {
MergeResult result = new MergeResult(true, updateAllTypes);
existingObjectMapper.merge(newObjectMapper, result);
if (result.hasConflicts()) {
throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" +
Arrays.toString(result.buildConflicts()));
}
}
fullPathObjectMappers.put(objectMapper.fullPath(), mappers);
// update the hasNested flag
}
fieldTypes.checkCompatibility(newFieldMappers, updateAllTypes);
}
protected void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread();
ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
for (ObjectMapper objectMapper : objectMappers) {
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNested = true;
}
}
this.fullPathObjectMappers = fullPathObjectMappers.build();
}
protected void addFieldMappers(Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread();
this.fieldMappers = this.fieldMappers.copyAndAddAll(fieldMappers);
this.fieldTypes = this.fieldTypes.copyAndAddAll(fieldMappers);
}
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
@ -479,11 +505,7 @@ public class MapperService extends AbstractIndexComponent {
* If multiple types have fields with the same index name, the first is returned.
*/
public MappedFieldType indexName(String indexName) {
FieldMappers mappers = fieldMappers.indexName(indexName);
if (mappers == null) {
return null;
}
return mappers.mapper().fieldType();
return fieldTypes.getByIndexName(indexName);
}
/**
@ -492,11 +514,7 @@ public class MapperService extends AbstractIndexComponent {
* If multiple types have fields with the same full name, the first is returned.
*/
public MappedFieldType fullName(String fullName) {
FieldMappers mappers = fieldMappers.fullName(fullName);
if (mappers == null) {
return null;
}
return mappers.mapper().fieldType();
return fieldTypes.get(fullName);
}
/**
@ -504,52 +522,21 @@ public class MapperService extends AbstractIndexComponent {
* then the fields will be returned with a type prefix.
*/
public Collection<String> simpleMatchToIndexNames(String pattern) {
return simpleMatchToIndexNames(pattern, null);
}
/**
* Returns all the fields that match the given pattern, with an optional narrowing
* based on a list of types.
*/
public Collection<String> simpleMatchToIndexNames(String pattern, @Nullable String[] types) {
if (Regex.isSimpleMatchPattern(pattern) == false) {
// no wildcards
return ImmutableList.of(pattern);
}
if (MetaData.isAllTypes(types)) {
return fieldMappers.simpleMatchToIndexNames(pattern);
}
List<String> fields = Lists.newArrayList();
for (String type : types) {
DocumentMapper possibleDocMapper = mappers.get(type);
if (possibleDocMapper != null) {
for (String indexName : possibleDocMapper.mappers().simpleMatchToIndexNames(pattern)) {
fields.add(indexName);
}
}
}
return fields;
return fieldTypes.simpleMatchToIndexNames(pattern);
}
// TODO: remove this since the underlying index names are now the same across all types
public Collection<String> simpleMatchToIndexNames(String pattern, @Nullable String[] types) {
return simpleMatchToIndexNames(pattern);
}
// TODO: remove types param, since the object mapper must be the same across all types
public ObjectMapper getObjectMapper(String name, @Nullable String[] types) {
if (types == null || types.length == 0 || types.length == 1 && types[0].equals("_all")) {
ObjectMappers mappers = fullPathObjectMappers.get(name);
if (mappers != null) {
return mappers.mapper();
}
return null;
}
for (String type : types) {
DocumentMapper possibleDocMapper = mappers.get(type);
if (possibleDocMapper != null) {
ObjectMapper mapper = possibleDocMapper.objectMappers().get(name);
if (mapper != null) {
return mapper;
}
}
}
return null;
return fullPathObjectMappers.get(name);
}
public MappedFieldType smartNameFieldType(String smartName) {
@ -560,22 +547,9 @@ public class MapperService extends AbstractIndexComponent {
return indexName(smartName);
}
// TODO: remove this since the underlying index names are now the same across all types
public MappedFieldType smartNameFieldType(String smartName, @Nullable String[] types) {
if (types == null || types.length == 0 || types.length == 1 && types[0].equals("_all")) {
return smartNameFieldType(smartName);
}
for (String type : types) {
DocumentMapper documentMapper = mappers.get(type);
// we found a mapper
if (documentMapper != null) {
// see if we find a field for it
FieldMappers mappers = documentMapper.mappers().smartName(smartName);
if (mappers != null) {
return mappers.mapper().fieldType();
}
}
}
return null;
return smartNameFieldType(smartName);
}
/**
@ -604,6 +578,10 @@ public class MapperService extends AbstractIndexComponent {
return fieldType;
}
public Analyzer indexAnalyzer() {
return this.indexAnalyzer;
}
public Analyzer searchAnalyzer() {
return this.searchAnalyzer;
}
@ -622,18 +600,14 @@ public class MapperService extends AbstractIndexComponent {
} else {
do {
String objectPath = fieldName.substring(0, indexOf);
ObjectMappers objectMappers = fullPathObjectMappers.get(objectPath);
if (objectMappers == null) {
ObjectMapper objectMapper = fullPathObjectMappers.get(objectPath);
if (objectMapper == null) {
indexOf = objectPath.lastIndexOf('.');
continue;
}
if (objectMappers.hasNested()) {
for (ObjectMapper objectMapper : objectMappers) {
if (objectMapper.nested().isNested()) {
return objectMapper;
}
}
if (objectMapper.nested().isNested()) {
return objectMapper;
}
indexOf = objectPath.lastIndexOf('.');
@ -654,39 +628,26 @@ public class MapperService extends AbstractIndexComponent {
return META_FIELDS.contains(fieldName);
}
final class SmartIndexNameSearchAnalyzer extends DelegatingAnalyzerWrapper {
/** An analyzer wrapper that can lookup fields within the index mappings */
final class MapperAnalyzerWrapper extends DelegatingAnalyzerWrapper {
private final Analyzer defaultAnalyzer;
private final Function<MappedFieldType, Analyzer> extractAnalyzer;
SmartIndexNameSearchAnalyzer(Analyzer defaultAnalyzer) {
MapperAnalyzerWrapper(Analyzer defaultAnalyzer, Function<MappedFieldType, Analyzer> extractAnalyzer) {
super(Analyzer.PER_FIELD_REUSE_STRATEGY);
this.defaultAnalyzer = defaultAnalyzer;
this.extractAnalyzer = extractAnalyzer;
}
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
MappedFieldType fieldType = smartNameFieldType(fieldName);
if (fieldType != null && fieldType.searchAnalyzer() != null) {
return fieldType.searchAnalyzer();
}
return defaultAnalyzer;
}
}
final class SmartIndexNameSearchQuoteAnalyzer extends DelegatingAnalyzerWrapper {
private final Analyzer defaultAnalyzer;
SmartIndexNameSearchQuoteAnalyzer(Analyzer defaultAnalyzer) {
super(Analyzer.PER_FIELD_REUSE_STRATEGY);
this.defaultAnalyzer = defaultAnalyzer;
}
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
MappedFieldType fieldType = smartNameFieldType(fieldName);
if (fieldType != null && fieldType.searchQuoteAnalyzer() != null) {
return fieldType.searchQuoteAnalyzer();
if (fieldType != null) {
Analyzer analyzer = extractAnalyzer.apply(fieldType);
if (analyzer != null) {
return analyzer;
}
}
return defaultAnalyzer;
}

View File

@ -19,28 +19,16 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.mapper.object.RootObjectMapper;
import java.io.IOException;
import java.util.Collection;
public enum MapperUtils {
;
private static MergeResult newStrictMergeResult() {
return new MergeResult(false) {
@Override
public boolean hasConflicts() {
return false;
}
@Override
public String[] buildConflicts() {
return Strings.EMPTY_ARRAY;
}
return new MergeResult(false, false) {
@Override
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import java.util.ArrayList;
@ -26,29 +27,55 @@ import java.util.Collection;
import java.util.List;
/** A container for tracking results of a mapping merge. */
public abstract class MergeResult {
public class MergeResult {
private final boolean simulate;
private final boolean updateAllTypes;
public MergeResult(boolean simulate) {
private final List<String> conflicts = new ArrayList<>();
private final List<FieldMapper> newFieldMappers = new ArrayList<>();
private final List<ObjectMapper> newObjectMappers = new ArrayList<>();
public MergeResult(boolean simulate, boolean updateAllTypes) {
this.simulate = simulate;
this.updateAllTypes = updateAllTypes;
}
public abstract void addFieldMappers(Collection<FieldMapper> fieldMappers);
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
assert simulate() == false;
newFieldMappers.addAll(fieldMappers);
}
public abstract void addObjectMappers(Collection<ObjectMapper> objectMappers);
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
assert simulate() == false;
newObjectMappers.addAll(objectMappers);
}
public abstract Collection<FieldMapper> getNewFieldMappers();
public Collection<FieldMapper> getNewFieldMappers() {
return newFieldMappers;
}
public abstract Collection<ObjectMapper> getNewObjectMappers();
public Collection<ObjectMapper> getNewObjectMappers() {
return newObjectMappers;
}
public boolean simulate() {
return simulate;
}
public abstract void addConflict(String mergeFailure);
public boolean updateAllTypes() {
return updateAllTypes;
}
public abstract boolean hasConflicts();
public void addConflict(String mergeFailure) {
conflicts.add(mergeFailure);
}
public abstract String[] buildConflicts();
}
public boolean hasConflicts() {
return conflicts.isEmpty() == false;
}
public String[] buildConflicts() {
return conflicts.toArray(Strings.EMPTY_ARRAY);
}
}

View File

@ -280,6 +280,11 @@ public abstract class ParseContext {
return in.analysisService();
}
@Override
public MapperService mapperService() {
return in.mapperService();
}
@Override
public String id() {
return in.id();
@ -513,6 +518,11 @@ public abstract class ParseContext {
return docMapperParser.analysisService;
}
@Override
public MapperService mapperService() {
return docMapperParser.mapperService;
}
@Override
public String id() {
return id;
@ -701,6 +711,8 @@ public abstract class ParseContext {
public abstract AnalysisService analysisService();
public abstract MapperService mapperService();
public abstract String id();
public abstract void ignoredValue(String indexName, String value);

View File

@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper.core;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Function;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import org.apache.lucene.document.Field;
@ -39,6 +38,7 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MappedFieldTypeReference;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeMappingException;
@ -63,18 +63,6 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.DOC_VALUES;
public abstract class AbstractFieldMapper implements FieldMapper {
public static class Defaults {
public static final MappedFieldType FIELD_TYPE = new MappedFieldType();
static {
FIELD_TYPE.setTokenized(true);
FIELD_TYPE.setStored(false);
FIELD_TYPE.setStoreTermVectors(false);
FIELD_TYPE.setOmitNorms(false);
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
FIELD_TYPE.setBoost(Defaults.BOOST);
FIELD_TYPE.freeze();
}
public static final float BOOST = 1.0f;
public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
}
@ -133,7 +121,7 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
public T storeTermVectors(boolean termVectors) {
if (termVectors) {
if (termVectors != this.fieldType.storeTermVectors()) {
this.fieldType.setStoreTermVectors(termVectors);
} // don't set it to false, it is default and might be flipped by a more specific option
return builder;
@ -268,7 +256,7 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
}
protected MappedFieldType fieldType;
protected MappedFieldTypeReference fieldTypeRef;
protected final boolean hasDefaultDocValues;
protected Settings customFieldDataSettings;
protected final MultiFields multiFields;
@ -302,14 +290,16 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
hasDefaultDocValues = docValues == null;
this.fieldType = fieldType.clone();
this.fieldTypeRef = new MappedFieldTypeReference(fieldType); // must init first so defaultDocValues() can be called
fieldType = fieldType.clone();
if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) {
this.fieldType().setIndexAnalyzer(Lucene.KEYWORD_ANALYZER);
this.fieldType().setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER);
fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
}
this.fieldType().setHasDocValues(docValues == null ? defaultDocValues() : docValues);
this.fieldType().setFieldDataType(fieldDataType);
this.fieldType().freeze();
fieldType.setHasDocValues(docValues == null ? defaultDocValues() : docValues);
fieldType.setFieldDataType(fieldDataType);
fieldType.freeze();
this.fieldTypeRef.set(fieldType); // now reset ref once extra settings have been initialized
this.multiFields = multiFields;
this.copyTo = copyTo;
@ -335,7 +325,21 @@ public abstract class AbstractFieldMapper implements FieldMapper {
@Override
public MappedFieldType fieldType() {
return fieldType;
return fieldTypeRef.get();
}
@Override
public MappedFieldTypeReference fieldTypeReference() {
return fieldTypeRef;
}
@Override
public void setFieldTypeReference(MappedFieldTypeReference ref) {
if (ref.get().equals(fieldType()) == false) {
throw new IllegalStateException("Cannot overwrite field type reference to unequal reference");
}
ref.incrementAssociatedMappers();
this.fieldTypeRef = ref;
}
@Override
@ -393,7 +397,16 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
AbstractFieldMapper fieldMergeWith = (AbstractFieldMapper) mergeWith;
List<String> subConflicts = new ArrayList<>(); // TODO: just expose list from MergeResult?
fieldType().checkCompatibility(fieldMergeWith.fieldType(), subConflicts);
fieldType().checkTypeName(fieldMergeWith.fieldType(), subConflicts);
if (subConflicts.isEmpty() == false) {
// return early if field types don't match
assert subConflicts.size() == 1;
mergeResult.addConflict(subConflicts.get(0));
return;
}
boolean strict = this.fieldTypeRef.getNumAssociatedMappers() > 1 && mergeResult.updateAllTypes() == false;
fieldType().checkCompatibility(fieldMergeWith.fieldType(), subConflicts, strict);
for (String conflict : subConflicts) {
mergeResult.addConflict(conflict);
}
@ -401,13 +414,10 @@ public abstract class AbstractFieldMapper implements FieldMapper {
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
// apply changeable values
this.fieldType = fieldMergeWith.fieldType().clone();
this.fieldType().freeze();
if (fieldMergeWith.customFieldDataSettings != null) {
if (!Objects.equal(fieldMergeWith.customFieldDataSettings, this.customFieldDataSettings)) {
this.customFieldDataSettings = fieldMergeWith.customFieldDataSettings;
}
}
MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
fieldType.freeze();
fieldTypeRef.set(fieldType);
this.customFieldDataSettings = fieldMergeWith.customFieldDataSettings;
this.copyTo = fieldMergeWith.copyTo;
}
}
@ -468,7 +478,7 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
TreeMap<String, Object> orderedFielddataSettings = new TreeMap<>();
if (customFieldDataSettings != null) {
if (hasCustomFieldDataSettings()) {
orderedFielddataSettings.putAll(customFieldDataSettings.getAsMap());
builder.field("fielddata", orderedFielddataSettings);
} else if (includeDefaults) {
@ -548,13 +558,12 @@ public abstract class AbstractFieldMapper implements FieldMapper {
}
}
protected abstract String contentType();
@Override
public void close() {
multiFields.close();
protected boolean hasCustomFieldDataSettings() {
return customFieldDataSettings != null && customFieldDataSettings.equals(Settings.EMPTY) == false;
}
protected abstract String contentType();
public static class MultiFields {
public static MultiFields empty() {
@ -688,12 +697,6 @@ public abstract class AbstractFieldMapper implements FieldMapper {
});
}
public void close() {
for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
cursor.value.close();
}
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (pathType != Defaults.PATH_TYPE) {
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));

View File

@ -109,9 +109,7 @@ public class BinaryFieldMapper extends AbstractFieldMapper {
static final class BinaryFieldType extends MappedFieldType {
private boolean tryUncompressing = false;
public BinaryFieldType() {
super(AbstractFieldMapper.Defaults.FIELD_TYPE);
}
public BinaryFieldType() {}
protected BinaryFieldType(BinaryFieldType ref) {
super(ref);
@ -135,6 +133,11 @@ public class BinaryFieldMapper extends AbstractFieldMapper {
return Objects.hash(super.hashCode(), tryUncompressing);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
public boolean tryUncompressing() {
return tryUncompressing;
}

View File

@ -118,9 +118,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper {
public static final class BooleanFieldType extends MappedFieldType {
public BooleanFieldType() {
super(AbstractFieldMapper.Defaults.FIELD_TYPE);
}
public BooleanFieldType() {}
protected BooleanFieldType(BooleanFieldType ref) {
super(ref);
@ -131,6 +129,11 @@ public class BooleanFieldMapper extends AbstractFieldMapper {
return new BooleanFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public Boolean nullValue() {
return (Boolean)super.nullValue();

View File

@ -121,7 +121,9 @@ public class ByteFieldMapper extends NumberFieldMapper {
}
static final class ByteFieldType extends NumberFieldType {
public ByteFieldType() {}
public ByteFieldType() {
super(NumericType.INT);
}
protected ByteFieldType(ByteFieldType ref) {
super(ref);
@ -132,6 +134,11 @@ public class ByteFieldMapper extends NumberFieldMapper {
return new ByteFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public Byte nullValue() {
return (Byte)super.nullValue();

View File

@ -226,9 +226,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper {
private AnalyzingCompletionLookupProvider analyzingSuggestLookupProvider;
private SortedMap<String, ContextMapping> contextMapping = ContextMapping.EMPTY_MAPPING;
public CompletionFieldType() {
super(AbstractFieldMapper.Defaults.FIELD_TYPE);
}
public CompletionFieldType() {}
protected CompletionFieldType(CompletionFieldType ref) {
super(ref);
@ -243,8 +241,13 @@ public class CompletionFieldMapper extends AbstractFieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts);
public String typeName() {
return CONTENT_TYPE;
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
CompletionFieldType other = (CompletionFieldType)fieldType;
if (analyzingSuggestLookupProvider.hasPayloads() != other.analyzingSuggestLookupProvider.hasPayloads()) {
conflicts.add("mapper [" + names().fullName() + "] has different payload values");

View File

@ -20,6 +20,7 @@
package org.elasticsearch.index.mapper.core;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Terms;
@ -221,7 +222,9 @@ public class DateFieldMapper extends NumberFieldMapper {
protected TimeUnit timeUnit = Defaults.TIME_UNIT;
protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter);
public DateFieldType() {}
public DateFieldType() {
super(NumericType.LONG);
}
protected DateFieldType(DateFieldType ref) {
super(ref);
@ -239,6 +242,7 @@ public class DateFieldMapper extends NumberFieldMapper {
if (!super.equals(o)) return false;
DateFieldType that = (DateFieldType) o;
return Objects.equals(dateTimeFormatter.format(), that.dateTimeFormatter.format()) &&
Objects.equals(dateTimeFormatter.locale(), that.dateTimeFormatter.locale()) &&
Objects.equals(timeUnit, that.timeUnit);
}
@ -247,6 +251,28 @@ public class DateFieldMapper extends NumberFieldMapper {
return Objects.hash(super.hashCode(), dateTimeFormatter.format(), timeUnit);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
if (strict) {
DateFieldType other = (DateFieldType)fieldType;
if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [format] across all types.");
}
if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [locale] across all types.");
}
if (Objects.equals(timeUnit(), other.timeUnit()) == false) {
conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [numeric_resolution] across all types.");
}
}
}
public FormatDateTimeFormatter dateTimeFormatter() {
return dateTimeFormatter;
}

View File

@ -20,9 +20,11 @@
package org.elasticsearch.index.mapper.core;
import com.carrotsearch.hppc.DoubleArrayList;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery;
@ -124,9 +126,11 @@ public class DoubleFieldMapper extends NumberFieldMapper {
}
}
static final class DoubleFieldType extends NumberFieldType {
public static final class DoubleFieldType extends NumberFieldType {
public DoubleFieldType() {}
public DoubleFieldType() {
super(NumericType.DOUBLE);
}
protected DoubleFieldType(DoubleFieldType ref) {
super(ref);
@ -137,6 +141,11 @@ public class DoubleFieldMapper extends NumberFieldMapper {
return new DoubleFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public Double nullValue() {
return (Double)super.nullValue();

Some files were not shown because too many files have changed in this diff Show More