Merge branch 'master' into settings_prototype

This commit is contained in:
Simon Willnauer 2015-12-16 20:29:21 +01:00
commit 71b204ea49
149 changed files with 4277 additions and 1464 deletions

View File

@ -97,6 +97,7 @@ subprojects {
// the "value" -quiet is added, separated by a space. This is ok since the javadoc // the "value" -quiet is added, separated by a space. This is ok since the javadoc
// command already adds -quiet, so we are just duplicating it // command already adds -quiet, so we are just duplicating it
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
javadoc.options.encoding='UTF8'
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
} }
} }

View File

@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule {
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
registerAction(FlushAction.INSTANCE, TransportFlushAction.class); registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);

View File

@ -0,0 +1,44 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
public static final String NAME = "indices:admin/synced_flush";
private SyncedFlushAction() {
super(NAME);
}
@Override
public SyncedFlushResponse newResponse() {
return new SyncedFlushResponse();
}
@Override
public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new SyncedFlushRequestBuilder(client, this);
}
}

View File

@ -0,0 +1,64 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import java.util.Arrays;
/**
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
* and writes the same sync id to primary and all copies.
*
* <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
*
* @see org.elasticsearch.client.Requests#flushRequest(String...)
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
* @see SyncedFlushResponse
*/
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
public SyncedFlushRequest() {
}
/**
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public SyncedFlushRequest(ActionRequest originalRequest) {
super(originalRequest);
}
/**
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
* be sync flushed.
*/
public SyncedFlushRequest(String... indices) {
super(indices);
}
@Override
public String toString() {
return "SyncedFlushRequest{" +
"indices=" + Arrays.toString(indices) + "}";
}
}

View File

@ -0,0 +1,41 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ElasticsearchClient;
public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
super(client, action, new SyncedFlushRequest());
}
public SyncedFlushRequestBuilder setIndices(String[] indices) {
super.request().indices(indices);
return this;
}
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
super.request().indicesOptions(indicesOptions);
return this;
}
}

View File

@ -16,16 +16,25 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.indices.flush; package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap;
/** /**
* The result of performing a sync flush operation on all shards of multiple indices * The result of performing a sync flush operation on all shards of multiple indices
*/ */
public class IndicesSyncedFlushResult implements ToXContent { public class SyncedFlushResponse extends ActionResponse implements ToXContent {
final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex; Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
final ShardCounts shardCounts; ShardCounts shardCounts;
SyncedFlushResponse() {
public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) { }
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
// shardsResultPerIndex is never modified after it is passed to this // shardsResultPerIndex is never modified after it is passed to this
// constructor so this is safe even though shardsResultPerIndex is a // constructor so this is safe even though shardsResultPerIndex is a
// ConcurrentHashMap // ConcurrentHashMap
@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
} }
/** total number shards, including replicas, both assigned and unassigned */ /**
* total number shards, including replicas, both assigned and unassigned
*/
public int totalShards() { public int totalShards() {
return shardCounts.total; return shardCounts.total;
} }
/** total number of shards for which the operation failed */ /**
* total number of shards for which the operation failed
*/
public int failedShards() { public int failedShards() {
return shardCounts.failed; return shardCounts.failed;
} }
/** total number of shards which were successfully sync-flushed */ /**
* total number of shards which were successfully sync-flushed
*/
public int successfulShards() { public int successfulShards() {
return shardCounts.successful; return shardCounts.successful;
} }
@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.endObject(); builder.endObject();
continue; continue;
} }
Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards(); Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) { for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
builder.startObject(); builder.startObject();
builder.field(Fields.SHARD, shardResults.shardId().id()); builder.field(Fields.SHARD, shardResults.shardId().id());
builder.field(Fields.REASON, shardEntry.getValue().failureReason()); builder.field(Fields.REASON, shardEntry.getValue().failureReason());
@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent {
return new ShardCounts(total, successful, failed); return new ShardCounts(total, successful, failed);
} }
static final class ShardCounts implements ToXContent { static final class ShardCounts implements ToXContent, Streamable {
public final int total; public int total;
public final int successful; public int successful;
public final int failed; public int failed;
ShardCounts(int total, int successful, int failed) { ShardCounts(int total, int successful, int failed) {
this.total = total; this.total = total;
@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.failed = failed; this.failed = failed;
} }
ShardCounts() {
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOTAL, total); builder.field(Fields.TOTAL, total);
@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.field(Fields.FAILED, failed); builder.field(Fields.FAILED, failed);
return builder; return builder;
} }
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readInt();
successful = in.readInt();
failed = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(total);
out.writeInt(successful);
out.writeInt(failed);
}
} }
static final class Fields { static final class Fields {
@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent {
static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString REASON = new XContentBuilderString("reason"); static final XContentBuilderString REASON = new XContentBuilderString("reason");
} }
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardCounts = new ShardCounts();
shardCounts.readFrom(in);
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
int numShardsResults = in.readInt();
for (int i =0 ; i< numShardsResults; i++) {
String index = in.readString();
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
int numShards = in.readInt();
for (int j =0; j< numShards; j++) {
shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in));
}
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
}
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardCounts.writeTo(out);
out.writeInt(shardsResultPerIndex.size());
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
out.writeString(entry.getKey());
out.writeInt(entry.getValue().size());
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
shardsSyncedFlushResult.writeTo(out);
}
}
}
} }

View File

@ -0,0 +1,52 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
/**
* Synced flush Action.
*/
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
SyncedFlushService syncedFlushService;
@Inject
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SyncedFlushService syncedFlushService) {
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
this.syncedFlushService = syncedFlushService;
}
@Override
protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
}
}

View File

@ -19,8 +19,16 @@
package org.elasticsearch.action.support.broadcast.node; package org.elasticsearch.action.support.broadcast.node;
import org.elasticsearch.action.*; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.*; import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
e.setIndex(shardRouting.getIndex()); e.setIndex(shardRouting.getIndex());
e.setShard(shardRouting.shardId()); e.setShard(shardRouting.shardId());
shardResults[shardIndex] = e; shardResults[shardIndex] = e;
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); if (TransportActions.isShardNotAvailableException(t)) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
}
}
} }
} }
} }

View File

@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override @Override
public void onFailure(Throwable t) { public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) { if (t instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request); logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
public void onNewClusterState(ClusterState state) { public void onNewClusterState(ClusterState state) {
threadPool.executor(executor).execute(AsyncReplicaAction.this); // Forking a thread on local node via transport service so that custom transport service have an
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
} }
@Override @Override

View File

@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
@ -390,6 +393,29 @@ public interface IndicesAdminClient extends ElasticsearchClient {
*/ */
FlushRequestBuilder prepareFlush(String... indices); FlushRequestBuilder prepareFlush(String... indices);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @return A result future
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
*/
ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @param listener A listener to be notified with a result
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
*/
void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*/
SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
/** /**
* Explicitly force merge one or more indices into a the number of segments. * Explicitly force merge one or more indices into a the number of segments.
* *

View File

@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
@ -131,7 +132,7 @@ public class Requests {
public static SuggestRequest suggestRequest(String... indices) { public static SuggestRequest suggestRequest(String... indices) {
return new SuggestRequest(indices); return new SuggestRequest(indices);
} }
/** /**
* Creates a search request against one or more indices. Note, the search source must be set either using the * Creates a search request against one or more indices. Note, the search source must be set either using the
* actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.
@ -265,6 +266,17 @@ public class Requests {
return new FlushRequest(indices); return new FlushRequest(indices);
} }
/**
* Creates a synced flush indices request.
*
* @param indices The indices to sync flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The synced flush request
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
*/
public static SyncedFlushRequest syncedFlushRequest(String... indices) {
return new SyncedFlushRequest(indices);
}
/** /**
* Creates a force merge request. * Creates a force merge request.
* *

View File

@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
@ -1315,6 +1319,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices);
} }
@Override
public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
return execute(SyncedFlushAction.INSTANCE, request);
}
@Override
public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
execute(SyncedFlushAction.INSTANCE, request, listener);
}
@Override
public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
}
@Override @Override
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) { public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
execute(GetMappingsAction.INSTANCE, request, listener); execute(GetMappingsAction.INSTANCE, request, listener);

View File

@ -20,7 +20,12 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Locale;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
/**
*
*/
public class ShardStateAction extends AbstractComponent {
public class ShardStateAction extends AbstractComponent {
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
@ -97,18 +107,100 @@ public class ShardStateAction extends AbstractComponent {
options = TransportRequestOptions.builder().withTimeout(timeout).build(); options = TransportRequestOptions.builder().withTimeout(timeout).build();
} }
transportService.sendRequest(masterNode, transportService.sendRequest(masterNode,
SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleResponse(TransportResponse.Empty response) {
listener.onSuccess();
}
@Override
public void handleException(TransportException exp) {
logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry);
listener.onShardFailedFailure(masterNode, exp);
}
});
}
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
@Override
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
handleShardFailureOnMaster(request, new ClusterStateTaskListener() {
@Override @Override
public void handleResponse(TransportResponse.Empty response) { public void onFailure(String source, Throwable t) {
listener.onSuccess(); logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting);
try {
channel.sendResponse(t);
} catch (Throwable channelThrowable) {
logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting);
}
} }
@Override @Override
public void handleException(TransportException exp) { public void onNoLongerMaster(String source) {
logger.warn("failed to send failed shard to {}", exp, masterNode); logger.error("no longer master while failing shard [{}]", request.shardRouting);
listener.onShardFailedFailure(masterNode, exp); try {
channel.sendResponse(new NotMasterException(source));
} catch (Throwable channelThrowable) {
logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting);
}
} }
});
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
try {
int numberOfUnassignedShards = newState.getRoutingNodes().unassigned().size();
if (oldState != newState && numberOfUnassignedShards > 0) {
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shard [%s]", numberOfUnassignedShards, request.shardRouting);
if (logger.isTraceEnabled()) {
logger.trace(reason + ", scheduling a reroute");
}
routingService.reroute(reason);
}
} finally {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Throwable channelThrowable) {
logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting);
}
}
}
}
);
}
}
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> {
@Override
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
}
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
batchResultBuilder.successes(tasks);
} catch (Throwable t) {
batchResultBuilder.failures(tasks, t);
}
return batchResultBuilder.build(maybeUpdatedState);
}
}
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
clusterService.submitStateUpdateTask(
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.HIGH),
shardFailedClusterStateHandler,
listener);
} }
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
@ -124,74 +216,20 @@ public class ShardStateAction extends AbstractComponent {
ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
transportService.sendRequest(masterNode, transportService.sendRequest(masterNode,
SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override @Override
public void handleException(TransportException exp) { public void handleException(TransportException exp) {
logger.warn("failed to send shard started to [{}]", exp, masterNode); logger.warn("failed to send shard started to [{}]", exp, masterNode);
}
});
}
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
clusterService.submitStateUpdateTask(
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.HIGH),
shardFailedClusterStateHandler,
shardFailedClusterStateHandler);
}
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
@Override
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
}
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
} }
batchResultBuilder.successes(tasks); });
} catch (Throwable t) {
batchResultBuilder.failures(tasks, t);
}
return batchResultBuilder.build(maybeUpdatedState);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
}
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
} }
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
new ShardStartedClusterStateHandler(); @Override
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { handleShardStartedOnMaster(request);
logger.debug("received shard started for {}", shardRoutingEntry); channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
clusterService.submitStateUpdateTask(
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.URGENT),
shardStartedClusterStateHandler,
shardStartedClusterStateHandler);
} }
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener { class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
@ -223,26 +261,20 @@ public class ShardStateAction extends AbstractComponent {
} }
} }
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler();
@Override private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { logger.debug("received shard started for {}", shardRoutingEntry);
handleShardFailureOnMaster(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { clusterService.submitStateUpdateTask(
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
@Override shardRoutingEntry,
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { ClusterStateTaskConfig.build(Priority.URGENT),
shardStartedOnMaster(request); shardStartedClusterStateHandler,
channel.sendResponse(TransportResponse.Empty.INSTANCE); shardStartedClusterStateHandler);
}
} }
public static class ShardRoutingEntry extends TransportRequest { public static class ShardRoutingEntry extends TransportRequest {
ShardRouting shardRouting; ShardRouting shardRouting;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
String message; String message;
@ -283,8 +315,13 @@ public class ShardStateAction extends AbstractComponent {
} }
public interface Listener { public interface Listener {
default void onSuccess() {} default void onSuccess() {
default void onShardFailedNoMaster() {} }
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {}
default void onShardFailedNoMaster() {
}
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
}
} }
} }

View File

@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
@ -251,11 +250,8 @@ public class MetaDataMappingService extends AbstractComponent {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
if (existingMapper != null) { if (existingMapper != null) {
// first, simulate // first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); // this will just throw exceptions in case of problems
// if we have conflicts, throw an exception existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
if (mergeResult.hasConflicts()) {
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
}
} else { } else {
// TODO: can we find a better place for this validation? // TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about // The reason this validation is here is that the mapper service doesn't learn about

View File

@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
@ -31,7 +30,14 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import java.util.*; import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate; import java.util.function.Predicate;
/** /**
@ -78,7 +84,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>(); Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
// fill in the nodeToShards with the "live" nodes // fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) { for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>()); nodesToShards.put(cursor.value.id(), new ArrayList<>());
} }
// fill in the inverse of node -> shards allocated // fill in the inverse of node -> shards allocated
@ -91,21 +97,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// by the ShardId, as this is common for primary and replicas. // by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING. // A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) { if (shard.assignedToNode()) {
List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId()); List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>());
if (entries == null) {
entries = new ArrayList<>();
nodesToShards.put(shard.currentNodeId(), entries);
}
final ShardRouting sr = getRouting(shard, readOnly); final ShardRouting sr = getRouting(shard, readOnly);
entries.add(sr); entries.add(sr);
assignedShardsAdd(sr); assignedShardsAdd(sr);
if (shard.relocating()) { if (shard.relocating()) {
entries = nodesToShards.get(shard.relocatingNodeId());
relocatingShards++; relocatingShards++;
if (entries == null) { entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>());
entries = new ArrayList<>();
nodesToShards.put(shard.relocatingNodeId(), entries);
}
// add the counterpart shard with relocatingNodeId reflecting the source from which // add the counterpart shard with relocatingNodeId reflecting the source from which
// it's relocating from. // it's relocating from.
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
@ -121,7 +119,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
inactiveShardCount++; inactiveShardCount++;
} }
} else { } else {
final ShardRouting sr = getRouting(shard, readOnly); final ShardRouting sr = getRouting(shard, readOnly);
assignedShardsAdd(sr); assignedShardsAdd(sr);
unassignedShards.add(sr); unassignedShards.add(sr);
} }
@ -449,12 +447,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// no unassigned // no unassigned
return; return;
} }
List<ShardRouting> shards = assignedShards.get(shard.shardId()); List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
if (shards == null) { assert assertInstanceNotInList(shard, shards);
shards = new ArrayList<>();
assignedShards.put(shard.shardId(), shards);
}
assert assertInstanceNotInList(shard, shards);
shards.add(shard); shards.add(shard);
} }

View File

@ -20,8 +20,19 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterState.Builder;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
@ -44,15 +55,33 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.*; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.*; import java.util.ArrayList;
import java.util.concurrent.*; import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -292,6 +321,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
if (config.timeout() != null) { if (config.timeout() != null) {
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
if (updateTask.processed.getAndSet(true) == false) { if (updateTask.processed.getAndSet(true) == false) {
logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout());
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
}})); }}));
} else { } else {
@ -413,6 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
assert batchResult.executionResults != null; assert batchResult.executionResults != null;
assert batchResult.executionResults.size() == toExecute.size()
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size());
boolean assertsEnabled = false;
assert (assertsEnabled = true);
if (assertsEnabled) {
for (UpdateTask<T> updateTask : toExecute) {
assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]";
}
}
ClusterState newClusterState = batchResult.resultingState; ClusterState newClusterState = batchResult.resultingState;
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>(); final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
final ClusterStateTaskExecutor.TaskResult executionResult = final ClusterStateTaskExecutor.TaskResult executionResult =
batchResult.executionResults.get(updateTask.task); batchResult.executionResults.get(updateTask.task);
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); executionResult.handle(
() -> proccessedListeners.add(updateTask),
ex -> {
logger.debug("cluster state update task [{}] failed", ex, updateTask.source);
updateTask.listener.onFailure(updateTask.source, ex);
}
);
} }
if (previousClusterState == newClusterState) { if (previousClusterState == newClusterState) {

View File

@ -53,7 +53,7 @@ import java.util.Map;
*/ */
public final class XContentBuilder implements BytesStream, Releasable { public final class XContentBuilder implements BytesStream, Releasable {
public static enum FieldCaseConversion { public enum FieldCaseConversion {
/** /**
* No conversion will occur. * No conversion will occur.
*/ */
@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
} }
public XContentBuilder field(XContentBuilderString name) throws IOException { public XContentBuilder field(XContentBuilderString name) throws IOException {
if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { return field(name, fieldCaseConversion);
generator.writeFieldName(name.underscore());
} else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
generator.writeFieldName(name.camelCase());
} else {
generator.writeFieldName(name.underscore());
}
return this;
} }
public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException { public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException {
@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable {
} }
public XContentBuilder field(String name) throws IOException { public XContentBuilder field(String name) throws IOException {
if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { return field(name, fieldCaseConversion);
if (cachedStringBuilder == null) {
cachedStringBuilder = new StringBuilder();
}
name = Strings.toUnderscoreCase(name, cachedStringBuilder);
} else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
if (cachedStringBuilder == null) {
cachedStringBuilder = new StringBuilder();
}
name = Strings.toCamelCase(name, cachedStringBuilder);
}
generator.writeFieldName(name);
return this;
} }
public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException { public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException {
if (name == null) {
throw new IllegalArgumentException("field name cannot be null");
}
if (conversion == FieldCaseConversion.UNDERSCORE) { if (conversion == FieldCaseConversion.UNDERSCORE) {
if (cachedStringBuilder == null) { if (cachedStringBuilder == null) {
cachedStringBuilder = new StringBuilder(); cachedStringBuilder = new StringBuilder();

View File

@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.*; import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShadowIndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
/** /**
* *
*/ */
public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard>{ public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
private final IndexEventListener eventListener; private final IndexEventListener eventListener;
private final AnalysisService analysisService; private final AnalysisService analysisService;
@ -93,7 +98,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private final AtomicBoolean deleted = new AtomicBoolean(false); private final AtomicBoolean deleted = new AtomicBoolean(false);
private final IndexSettings indexSettings; private final IndexSettings indexSettings;
@Inject
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
SimilarityService similarityService, SimilarityService similarityService,
ShardStoreDeleter shardStoreDeleter, ShardStoreDeleter shardStoreDeleter,
@ -146,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
*/ */
@Nullable @Nullable
public IndexShard getShardOrNull(int shardId) { public IndexShard getShardOrNull(int shardId) {
return shards.get(shardId); return shards.get(shardId);
} }
/** /**
@ -160,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
return indexShard; return indexShard;
} }
public Set<Integer> shardIds() { return shards.keySet(); } public Set<Integer> shardIds() {
return shards.keySet();
}
public IndexCache cache() { public IndexCache cache() {
return indexCache; return indexCache;
} }
public IndexFieldDataService fieldData() { return indexFieldData; } public IndexFieldDataService fieldData() {
return indexFieldData;
}
public AnalysisService analysisService() { public AnalysisService analysisService() {
return this.analysisService; return this.analysisService;
@ -207,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private long getAvgShardSizeInBytes() throws IOException { private long getAvgShardSizeInBytes() throws IOException {
long sum = 0; long sum = 0;
int count = 0; int count = 0;
for(IndexShard indexShard : this) { for (IndexShard indexShard : this) {
sum += indexShard.store().stats().sizeInBytes(); sum += indexShard.store().stats().sizeInBytes();
count++; count++;
} }
@ -254,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying: // that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path: // Count up how many shards are currently on each data path:
Map<Path,Integer> dataPathToShardCount = new HashMap<>(); Map<Path, Integer> dataPathToShardCount = new HashMap<>();
for(IndexShard shard : this) { for (IndexShard shard : this) {
Path dataPath = shard.shardPath().getRootStatePath(); Path dataPath = shard.shardPath().getRootStatePath();
Integer curCount = dataPathToShardCount.get(dataPath); Integer curCount = dataPathToShardCount.get(dataPath);
if (curCount == null) { if (curCount == null) {
curCount = 0; curCount = 0;
} }
dataPathToShardCount.put(dataPath, curCount+1); dataPathToShardCount.put(dataPath, curCount + 1);
} }
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
dataPathToShardCount); dataPathToShardCount);
logger.debug("{} creating using a new path [{}]", shardId, path); logger.debug("{} creating using a new path [{}]", shardId, path);
} else { } else {
logger.debug("{} creating using an existing path [{}]", shardId, path); logger.debug("{} creating using an existing path [{}]", shardId, path);
@ -277,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
logger.debug("creating shard_id {}", shardId); logger.debug("creating shard_id {}", shardId);
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
if (useShadowEngine(primary, indexSettings)) { if (useShadowEngine(primary, indexSettings)) {
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider);
@ -462,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
} }
} }
} }
/** /**
* Returns the filter associated with listed filtering aliases. * Returns the filter associated with listed filtering aliases.
* <p> * <p>

View File

@ -19,16 +19,9 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
public class ContentPath { public final class ContentPath {
public enum Type { private static final char DELIMITER = '.';
JUST_NAME,
FULL,
}
private Type pathType;
private final char delimiter;
private final StringBuilder sb; private final StringBuilder sb;
@ -47,7 +40,6 @@ public class ContentPath {
* number of path elements to not be included in {@link #pathAsText(String)}. * number of path elements to not be included in {@link #pathAsText(String)}.
*/ */
public ContentPath(int offset) { public ContentPath(int offset) {
this.delimiter = '.';
this.sb = new StringBuilder(); this.sb = new StringBuilder();
this.offset = offset; this.offset = offset;
reset(); reset();
@ -71,26 +63,11 @@ public class ContentPath {
} }
public String pathAsText(String name) { public String pathAsText(String name) {
if (pathType == Type.JUST_NAME) {
return name;
}
return fullPathAsText(name);
}
public String fullPathAsText(String name) {
sb.setLength(0); sb.setLength(0);
for (int i = offset; i < index; i++) { for (int i = offset; i < index; i++) {
sb.append(path[i]).append(delimiter); sb.append(path[i]).append(DELIMITER);
} }
sb.append(name); sb.append(name);
return sb.toString(); return sb.toString();
} }
public Type pathType() {
return pathType;
}
public void pathType(Type type) {
this.pathType = type;
}
} }

View File

@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@ -117,7 +118,7 @@ public class DocumentMapper implements ToXContent {
private volatile CompressedXContent mappingSource; private volatile CompressedXContent mappingSource;
private final Mapping mapping; private volatile Mapping mapping;
private final DocumentParser documentParser; private final DocumentParser documentParser;
@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent {
mapperService.addMappers(type, objectMappers, fieldMappers); mapperService.addMappers(type, objectMappers, fieldMappers);
} }
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) { try (ReleasableLock lock = mappingWriteLock.acquire()) {
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes); mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes); // do the merge even if simulate == false so that we get exceptions
this.mapping.merge(mapping, mergeResult); Mapping merged = this.mapping.merge(mapping, updateAllTypes);
if (simulate == false) { if (simulate == false) {
addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes); this.mapping = merged;
Collection<ObjectMapper> objectMappers = new ArrayList<>();
Collection<FieldMapper> fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers));
MapperUtils.collect(merged.root, objectMappers, fieldMappers);
addMappers(objectMappers, fieldMappers, updateAllTypes);
refreshSource(); refreshSource();
} }
return mergeResult;
} }
} }

View File

@ -234,9 +234,6 @@ class DocumentParser implements Closeable {
nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE));
} }
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(mapper.pathType());
// if we are at the end of the previous object, advance // if we are at the end of the previous object, advance
if (token == XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.END_OBJECT) {
token = parser.nextToken(); token = parser.nextToken();
@ -267,12 +264,11 @@ class DocumentParser implements Closeable {
if (update == null) { if (update == null) {
update = newUpdate; update = newUpdate;
} else { } else {
MapperUtils.merge(update, newUpdate); update = update.merge(newUpdate, false);
} }
} }
} }
// restore the enable path flag // restore the enable path flag
context.path().pathType(origPathType);
if (nested.isNested()) { if (nested.isNested()) {
ParseContext.Document nestedDoc = context.doc(); ParseContext.Document nestedDoc = context.doc();
ParseContext.Document parentDoc = nestedDoc.getParent(); ParseContext.Document parentDoc = nestedDoc.getParent();
@ -341,7 +337,7 @@ class DocumentParser implements Closeable {
context.path().remove(); context.path().remove();
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
if (builder == null) { if (builder == null) {
builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); builder = MapperBuilders.object(currentFieldName).enabled(true);
// if this is a non root object, then explicitly set the dynamic behavior if set // if this is a non root object, then explicitly set the dynamic behavior if set
if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic());
@ -610,7 +606,7 @@ class DocumentParser implements Closeable {
return null; return null;
} }
final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName)); final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName));
Mapper.Builder builder = null; Mapper.Builder builder = null;
if (existingFieldType != null) { if (existingFieldType != null) {
// create a builder of the same type // create a builder of the same type
@ -695,7 +691,7 @@ class DocumentParser implements Closeable {
if (paths.length > 1) { if (paths.length > 1) {
ObjectMapper parent = context.root(); ObjectMapper parent = context.root();
for (int i = 0; i < paths.length-1; i++) { for (int i = 0; i < paths.length-1; i++) {
mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i])); mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i]));
if (mapper == null) { if (mapper == null) {
// One mapping is missing, check if we are allowed to create a dynamic one. // One mapping is missing, check if we are allowed to create a dynamic one.
ObjectMapper.Dynamic dynamic = parent.dynamic(); ObjectMapper.Dynamic dynamic = parent.dynamic();
@ -713,12 +709,12 @@ class DocumentParser implements Closeable {
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) { if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(parent.dynamic()); ((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
} }
builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType()); builder = MapperBuilders.object(paths[i]).enabled(true);
} }
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
mapper = (ObjectMapper) builder.build(builderContext); mapper = (ObjectMapper) builder.build(builderContext);
if (mapper.nested() != ObjectMapper.Nested.NO) { if (mapper.nested() != ObjectMapper.Nested.NO) {
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`"); throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
} }
break; break;
case FALSE: case FALSE:
@ -759,7 +755,7 @@ class DocumentParser implements Closeable {
private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException {
final Mapper update = parseObjectOrField(context, mapper); final Mapper update = parseObjectOrField(context, mapper);
if (update != null) { if (update != null) {
MapperUtils.merge(mapper, update); mapper = (M) mapper.merge(update, false);
} }
return mapper; return mapper;
} }

View File

@ -47,7 +47,7 @@ import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
public abstract class FieldMapper extends Mapper { public abstract class FieldMapper extends Mapper implements Cloneable {
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> { public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper {
* if the fieldType has a non-null option we are all good it might have been set through a different * if the fieldType has a non-null option we are all good it might have been set through a different
* call. * call.
*/ */
final IndexOptions options = getDefaultIndexOption(); IndexOptions options = getDefaultIndexOption();
assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing"; if (options == IndexOptions.NONE) {
// can happen when an existing type on the same index has disabled indexing
// since we inherit the default field type from the first mapper that is
// created on an index
throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index");
}
fieldType.setIndexOptions(options); fieldType.setIndexOptions(options);
} }
} else { } else {
@ -202,11 +207,6 @@ public abstract class FieldMapper extends Mapper {
return this; return this;
} }
public T multiFieldPathType(ContentPath.Type pathType) {
multiFieldsBuilder.pathType(pathType);
return builder;
}
public T addMultiField(Mapper.Builder mapperBuilder) { public T addMultiField(Mapper.Builder mapperBuilder) {
multiFieldsBuilder.add(mapperBuilder); multiFieldsBuilder.add(mapperBuilder);
return builder; return builder;
@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper {
} }
protected String buildFullName(BuilderContext context) { protected String buildFullName(BuilderContext context) {
return context.path().fullPathAsText(name); return context.path().pathAsText(name);
} }
protected void setupFieldType(BuilderContext context) { protected void setupFieldType(BuilderContext context) {
@ -270,7 +270,7 @@ public abstract class FieldMapper extends Mapper {
protected MappedFieldTypeReference fieldTypeRef; protected MappedFieldTypeReference fieldTypeRef;
protected final MappedFieldType defaultFieldType; protected final MappedFieldType defaultFieldType;
protected final MultiFields multiFields; protected MultiFields multiFields;
protected CopyTo copyTo; protected CopyTo copyTo;
protected final boolean indexCreatedBefore2x; protected final boolean indexCreatedBefore2x;
@ -359,26 +359,41 @@ public abstract class FieldMapper extends Mapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected FieldMapper clone() {
try {
return (FieldMapper) super.clone();
} catch (CloneNotSupportedException e) {
throw new AssertionError(e);
}
}
@Override
public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
FieldMapper merged = clone();
merged.doMerge(mergeWith, updateAllTypes);
return merged;
}
/**
* Merge changes coming from {@code mergeWith} in place.
* @param updateAllTypes TODO
*/
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
if (!this.getClass().equals(mergeWith.getClass())) { if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName(); String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof FieldMapper) { if (mergeWith instanceof FieldMapper) {
mergedType = ((FieldMapper) mergeWith).contentType(); mergedType = ((FieldMapper) mergeWith).contentType();
} }
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
// different types, return
return;
} }
FieldMapper fieldMergeWith = (FieldMapper) mergeWith; FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
multiFields.merge(mergeWith, mergeResult); multiFields = multiFields.merge(fieldMergeWith.multiFields);
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { // apply changeable values
// apply changeable values MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); fieldType.freeze();
fieldType.freeze(); fieldTypeRef.set(fieldType);
fieldTypeRef.set(fieldType); this.copyTo = fieldMergeWith.copyTo;
this.copyTo = fieldMergeWith.copyTo;
}
} }
@Override @Override
@ -520,18 +535,12 @@ public abstract class FieldMapper extends Mapper {
public static class MultiFields { public static class MultiFields {
public static MultiFields empty() { public static MultiFields empty() {
return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.<String, FieldMapper>of()); return new MultiFields(ImmutableOpenMap.<String, FieldMapper>of());
} }
public static class Builder { public static class Builder {
private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder(); private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder();
private ContentPath.Type pathType = ContentPath.Type.FULL;
public Builder pathType(ContentPath.Type pathType) {
this.pathType = pathType;
return this;
}
public Builder add(Mapper.Builder builder) { public Builder add(Mapper.Builder builder) {
mapperBuilders.put(builder.name(), builder); mapperBuilders.put(builder.name(), builder);
@ -540,13 +549,9 @@ public abstract class FieldMapper extends Mapper {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) { public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) {
if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) { if (mapperBuilders.isEmpty()) {
return empty(); return empty();
} else if (mapperBuilders.isEmpty()) {
return new MultiFields(pathType, ImmutableOpenMap.<String, FieldMapper>of());
} else { } else {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(mainFieldBuilder.name()); context.path().add(mainFieldBuilder.name());
ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders; ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders;
for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) { for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) {
@ -557,26 +562,25 @@ public abstract class FieldMapper extends Mapper {
mapperBuilders.put(key, mapper); mapperBuilders.put(key, mapper);
} }
context.path().remove(); context.path().remove();
context.path().pathType(origPathType);
ImmutableOpenMap.Builder<String, FieldMapper> mappers = mapperBuilders.cast(); ImmutableOpenMap.Builder<String, FieldMapper> mappers = mapperBuilders.cast();
return new MultiFields(pathType, mappers.build()); return new MultiFields(mappers.build());
} }
} }
} }
private final ContentPath.Type pathType; private final ImmutableOpenMap<String, FieldMapper> mappers;
private volatile ImmutableOpenMap<String, FieldMapper> mappers;
public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) { private MultiFields(ImmutableOpenMap<String, FieldMapper> mappers) {
this.pathType = pathType; ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>();
this.mappers = mappers;
// we disable the all in multi-field mappers // we disable the all in multi-field mappers
for (ObjectCursor<FieldMapper> cursor : mappers.values()) { for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) {
FieldMapper mapper = cursor.value; FieldMapper mapper = cursor.value;
if (mapper instanceof AllFieldMapper.IncludeInAll) { if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
} }
builder.put(cursor.key, mapper);
} }
this.mappers = builder.build();
} }
public void parse(FieldMapper mainField, ParseContext context) throws IOException { public void parse(FieldMapper mainField, ParseContext context) throws IOException {
@ -587,58 +591,33 @@ public abstract class FieldMapper extends Mapper {
context = context.createMultiFieldContext(); context = context.createMultiFieldContext();
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(mainField.simpleName()); context.path().add(mainField.simpleName());
for (ObjectCursor<FieldMapper> cursor : mappers.values()) { for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
cursor.value.parse(context); cursor.value.parse(context);
} }
context.path().remove(); context.path().remove();
context.path().pathType(origPathType);
} }
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge public MultiFields merge(MultiFields mergeWith) {
public void merge(Mapper mergeWith, MergeResult mergeResult) { ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = ImmutableOpenMap.builder(mappers);
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
List<FieldMapper> newFieldMappers = null; for (ObjectCursor<FieldMapper> cursor : mergeWith.mappers.values()) {
ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = null;
for (ObjectCursor<FieldMapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
FieldMapper mergeWithMapper = cursor.value; FieldMapper mergeWithMapper = cursor.value;
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
if (mergeIntoMapper == null) { if (mergeIntoMapper == null) {
// no mapping, simply add it if not simulating // we disable the all in multi-field mappers
if (!mergeResult.simulate()) { if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
// we disable the all in multi-field mappers mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
}
if (newMappersBuilder == null) {
newMappersBuilder = ImmutableOpenMap.builder(mappers);
}
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
if (mergeWithMapper instanceof FieldMapper) {
if (newFieldMappers == null) {
newFieldMappers = new ArrayList<>(2);
}
newFieldMappers.add(mergeWithMapper);
}
} }
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
} else { } else {
mergeIntoMapper.merge(mergeWithMapper, mergeResult); FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
} }
} }
// first add all field mappers ImmutableOpenMap<String, FieldMapper> mappers = newMappersBuilder.build();
if (newFieldMappers != null) { return new MultiFields(mappers);
mergeResult.addFieldMappers(newFieldMappers);
}
// now publish mappers
if (newMappersBuilder != null) {
mappers = newMappersBuilder.build();
}
} }
public Iterator<Mapper> iterator() { public Iterator<Mapper> iterator() {
@ -646,9 +625,6 @@ public abstract class FieldMapper extends Mapper {
} }
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (pathType != ContentPath.Type.FULL) {
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
}
if (!mappers.isEmpty()) { if (!mappers.isEmpty()) {
// sort the mappers so we get consistent serialization format // sort the mappers so we get consistent serialization format
Mapper[] sortedMappers = mappers.values().toArray(Mapper.class); Mapper[] sortedMappers = mappers.values().toArray(Mapper.class);

View File

@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */ /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
public abstract String name(); public abstract String name();
public abstract void merge(Mapper mergeWith, MergeResult mergeResult); /** Return the merge of {@code mergeWith} into this.
* Both {@code this} and {@code mergeWith} will be left unmodified. */
public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes);
} }

View File

@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock()); private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock());
private volatile FieldTypeLookup fieldTypes; private volatile FieldTypeLookup fieldTypes;
private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of(); private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
private boolean hasNested = false; // updated dynamically to true when a nested object is added private boolean hasNested = false; // updated dynamically to true when a nested object is added
private final DocumentMapperParser documentParser; private final DocumentMapperParser documentParser;
@ -251,14 +250,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
DocumentMapper oldMapper = mappers.get(mapper.type()); DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) { if (oldMapper != null) {
// simulate first oldMapper.merge(mapper.mapping(), false, updateAllTypes);
MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
if (result.hasConflicts()) {
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
}
// then apply for real
result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
assert result.hasConflicts() == false; // we already simulated
return oldMapper; return oldMapper;
} else { } else {
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility( Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
@ -300,19 +292,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return true; return true;
} }
private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
final Set<String> objectFullNames = new HashSet<>();
for (ObjectMapper objectMapper : objectMappers) {
final String fullPath = objectMapper.fullPath();
if (objectFullNames.add(fullPath) == false) {
throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
}
}
if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) {
// Before 3.0 some metadata mappers are also registered under the root object mapper
// So we avoid false positives by deduplicating mappers
// given that we check exact equality, this would still catch the case that a mapper
// is defined under the root object
Collection<FieldMapper> uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>());
uniqueFieldMappers.addAll(fieldMappers);
fieldMappers = uniqueFieldMappers;
}
final Set<String> fieldNames = new HashSet<>();
for (FieldMapper fieldMapper : fieldMappers) {
final String name = fieldMapper.name();
if (objectFullNames.contains(name)) {
throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]");
} else if (fieldNames.add(name) == false) {
throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]");
}
}
}
protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) { protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
assert mappingLock.isWriteLockedByCurrentThread(); assert mappingLock.isWriteLockedByCurrentThread();
checkFieldUniqueness(type, objectMappers, fieldMappers);
for (ObjectMapper newObjectMapper : objectMappers) { for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) { if (existingObjectMapper != null) {
MergeResult result = new MergeResult(true, updateAllTypes); // simulate a merge and ignore the result, we are just interested
existingObjectMapper.merge(newObjectMapper, result); // in exceptions here
if (result.hasConflicts()) { existingObjectMapper.merge(newObjectMapper, updateAllTypes);
throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" +
Arrays.toString(result.buildConflicts()));
}
} }
} }
for (FieldMapper fieldMapper : fieldMappers) {
if (fullPathObjectMappers.containsKey(fieldMapper.name())) {
throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types");
}
}
fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes); fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes);
} }
@ -320,9 +349,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
String type, Mapping mapping, boolean updateAllTypes) { String type, Mapping mapping, boolean updateAllTypes) {
List<ObjectMapper> objectMappers = new ArrayList<>(); List<ObjectMapper> objectMappers = new ArrayList<>();
List<FieldMapper> fieldMappers = new ArrayList<>(); List<FieldMapper> fieldMappers = new ArrayList<>();
for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { Collections.addAll(fieldMappers, mapping.metadataMappers);
fieldMappers.add(metadataMapper);
}
MapperUtils.collect(mapping.root, objectMappers, fieldMappers); MapperUtils.collect(mapping.root, objectMappers, fieldMappers);
checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes); checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes);
return new Tuple<>(objectMappers, fieldMappers); return new Tuple<>(objectMappers, fieldMappers);
@ -330,14 +357,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread(); assert mappingLock.isWriteLockedByCurrentThread();
ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers); Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
for (ObjectMapper objectMapper : objectMappers) { for (ObjectMapper objectMapper : objectMappers) {
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) { if (objectMapper.nested().isNested()) {
hasNested = true; hasNested = true;
} }
} }
this.fullPathObjectMappers = fullPathObjectMappers.build(); this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers); this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers);
} }

View File

@ -27,52 +27,6 @@ import java.util.Collection;
public enum MapperUtils { public enum MapperUtils {
; ;
private static MergeResult newStrictMergeResult() {
return new MergeResult(false, false) {
@Override
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
// no-op
}
@Override
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
// no-op
}
@Override
public Collection<FieldMapper> getNewFieldMappers() {
throw new UnsupportedOperationException("Strict merge result does not support new field mappers");
}
@Override
public Collection<ObjectMapper> getNewObjectMappers() {
throw new UnsupportedOperationException("Strict merge result does not support new object mappers");
}
@Override
public void addConflict(String mergeFailure) {
throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure);
}
};
}
/**
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
*/
public static void merge(Mapper mergeInto, Mapper mergeWith) {
mergeInto.merge(mergeWith, newStrictMergeResult());
}
/**
* Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
* merges mappings, not lookup structures. Conflicts are returned as exceptions.
*/
public static void merge(Mapping mergeInto, Mapping mergeWith) {
mergeInto.merge(mergeWith, newStrictMergeResult());
}
/** Split mapper and its descendants into object and field mappers. */ /** Split mapper and its descendants into object and field mappers. */
public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
if (mapper instanceof RootObjectMapper) { if (mapper instanceof RootObjectMapper) {

View File

@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap;
*/ */
public final class Mapping implements ToXContent { public final class Mapping implements ToXContent {
public static final List<String> LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"); // Set of fields that were included into the root object mapper before 2.0
public static final Set<String> LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>(
Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl")));
final Version indexCreated; final Version indexCreated;
final RootObjectMapper root; final RootObjectMapper root;
final MetadataFieldMapper[] metadataMappers; final MetadataFieldMapper[] metadataMappers;
final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap; final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap;
volatile Map<String, Object> meta; final Map<String, Object> meta;
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) { public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
this.indexCreated = indexCreated; this.indexCreated = indexCreated;
this.root = rootObjectMapper;
this.metadataMappers = metadataMappers; this.metadataMappers = metadataMappers;
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>();
for (MetadataFieldMapper metadataMapper : metadataMappers) { for (MetadataFieldMapper metadataMapper : metadataMappers) {
if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) {
root.putMapper(metadataMapper); rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper);
} }
metadataMappersMap.put(metadataMapper.getClass(), metadataMapper); metadataMappersMap.put(metadataMapper.getClass(), metadataMapper);
} }
this.root = rootObjectMapper;
// keep root mappers sorted for consistent serialization // keep root mappers sorted for consistent serialization
Arrays.sort(metadataMappers, new Comparator<Mapper>() { Arrays.sort(metadataMappers, new Comparator<Mapper>() {
@Override @Override
@ -90,21 +94,20 @@ public final class Mapping implements ToXContent {
} }
/** @see DocumentMapper#merge(Mapping, boolean, boolean) */ /** @see DocumentMapper#merge(Mapping, boolean, boolean) */
public void merge(Mapping mergeWith, MergeResult mergeResult) { public Mapping merge(Mapping mergeWith, boolean updateAllTypes) {
assert metadataMappers.length == mergeWith.metadataMappers.length; RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes);
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
root.merge(mergeWith.root, mergeResult); for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
for (MetadataFieldMapper metadataMapper : metadataMappers) { MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass()); MetadataFieldMapper merged;
if (mergeWithMetadataMapper != null) { if (mergeInto == null) {
metadataMapper.merge(mergeWithMetadataMapper, mergeResult); merged = metaMergeWith;
} else {
merged = mergeInto.merge(metaMergeWith, updateAllTypes);
} }
mergedMetaDataMappers.put(merged.getClass(), merged);
} }
return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta);
if (mergeResult.simulate() == false) {
// let the merge with attributes to override the attributes
meta = mergeWith.meta;
}
} }
@Override @Override

View File

@ -1,81 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/** A container for tracking results of a mapping merge. */
public class MergeResult {
private final boolean simulate;
private final boolean updateAllTypes;
private final List<String> conflicts = new ArrayList<>();
private final List<FieldMapper> newFieldMappers = new ArrayList<>();
private final List<ObjectMapper> newObjectMappers = new ArrayList<>();
public MergeResult(boolean simulate, boolean updateAllTypes) {
this.simulate = simulate;
this.updateAllTypes = updateAllTypes;
}
public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
assert simulate() == false;
newFieldMappers.addAll(fieldMappers);
}
public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
assert simulate() == false;
newObjectMappers.addAll(objectMappers);
}
public Collection<FieldMapper> getNewFieldMappers() {
return newFieldMappers;
}
public Collection<ObjectMapper> getNewObjectMappers() {
return newObjectMappers;
}
public boolean simulate() {
return simulate;
}
public boolean updateAllTypes() {
return updateAllTypes;
}
public void addConflict(String mergeFailure) {
conflicts.add(mergeFailure);
}
public boolean hasConflicts() {
return conflicts.isEmpty() == false;
}
public String[] buildConflicts() {
return conflicts.toArray(Strings.EMPTY_ARRAY);
}
}

View File

@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
*/ */
public abstract void postParse(ParseContext context) throws IOException; public abstract void postParse(ParseContext context) throws IOException;
@Override
public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes);
}
} }

View File

@ -595,7 +595,7 @@ public abstract class ParseContext {
if (dynamicMappingsUpdate == null) { if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = mapper; dynamicMappingsUpdate = mapper;
} else { } else {
MapperUtils.merge(dynamicMappingsUpdate, mapper); dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false);
} }
} }

View File

@ -128,7 +128,7 @@ public class ParsedDocument {
if (dynamicMappingsUpdate == null) { if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = update; dynamicMappingsUpdate = update;
} else { } else {
MapperUtils.merge(dynamicMappingsUpdate, update); dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false);
} }
} }

View File

@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
setupFieldType(context); setupFieldType(context);
ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
if (!mergeResult.simulate()) { this.maxInputLength = fieldMergeWith.maxInputLength;
this.maxInputLength = fieldMergeWith.maxInputLength;
}
} }
} }

View File

@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper {
fieldType.setNullValue(nullValue); fieldType.setNullValue(nullValue);
DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (DateFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
setupFieldType(context); setupFieldType(context);
DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
setupFieldType(context); setupFieldType(context);
FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType, IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper {
setupFieldType(context); setupFieldType(context);
LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType, LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (LongFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
} }
@Override @Override
public void includeInAll(Boolean includeInAll) { protected NumberFieldMapper clone() {
return (NumberFieldMapper) super.clone();
}
@Override
public Mapper includeInAll(Boolean includeInAll) {
if (includeInAll != null) { if (includeInAll != null) {
this.includeInAll = includeInAll; NumberFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
} }
} }
@Override @Override
public void includeInAllIfNotSet(Boolean includeInAll) { public Mapper includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) { if (includeInAll != null && this.includeInAll == null) {
this.includeInAll = includeInAll; NumberFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
} }
} }
@Override @Override
public void unsetIncludeInAll() { public Mapper unsetIncludeInAll() {
includeInAll = null; if (includeInAll != null) {
NumberFieldMapper clone = clone();
clone.includeInAll = null;
return clone;
} else {
return this;
}
} }
@Override @Override
@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith;
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { this.includeInAll = nfmMergeWith.includeInAll;
this.includeInAll = nfmMergeWith.includeInAll; if (nfmMergeWith.ignoreMalformed.explicit()) {
if (nfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
this.ignoreMalformed = nfmMergeWith.ignoreMalformed; }
} if (nfmMergeWith.coerce.explicit()) {
if (nfmMergeWith.coerce.explicit()) { this.coerce = nfmMergeWith.coerce;
this.coerce = nfmMergeWith.coerce;
}
} }
} }

View File

@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType, ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.internal.AllFieldMapper;
@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
StringFieldMapper fieldMapper = new StringFieldMapper( StringFieldMapper fieldMapper = new StringFieldMapper(
name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove, name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
} }
@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
} }
@Override @Override
public void includeInAll(Boolean includeInAll) { protected StringFieldMapper clone() {
return (StringFieldMapper) super.clone();
}
@Override
public StringFieldMapper includeInAll(Boolean includeInAll) {
if (includeInAll != null) { if (includeInAll != null) {
this.includeInAll = includeInAll; StringFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
} }
} }
@Override @Override
public void includeInAllIfNotSet(Boolean includeInAll) { public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) { if (includeInAll != null && this.includeInAll == null) {
this.includeInAll = includeInAll; StringFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
} }
} }
@Override @Override
public void unsetIncludeInAll() { public StringFieldMapper unsetIncludeInAll() {
includeInAll = null; if (includeInAll != null) {
StringFieldMapper clone = clone();
clone.includeInAll = null;
return clone;
} else {
return this;
}
} }
@Override @Override
@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) { this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
return; this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
}
if (!mergeResult.simulate()) {
this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
}
} }
@Override @Override

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType, TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), context.indexSettings(), ignoreMalformed(context), coerce(context), context.indexSettings(),
analyzer, multiFieldsBuilder.build(this, context), copyTo); analyzer, multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override
@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) { this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
return;
}
if (!mergeResult.simulate()) {
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
}
} }
@Override @Override

View File

@ -61,7 +61,6 @@ public class TypeParsers {
@Override @Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
ContentPath.Type pathType = null;
FieldMapper.Builder mainFieldBuilder = null; FieldMapper.Builder mainFieldBuilder = null;
List<FieldMapper.Builder> fields = null; List<FieldMapper.Builder> fields = null;
String firstType = null; String firstType = null;
@ -70,10 +69,7 @@ public class TypeParsers {
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
String fieldName = Strings.toUnderscoreCase(entry.getKey()); String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue(); Object fieldNode = entry.getValue();
if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (fieldName.equals("fields")) {
pathType = parsePathType(name, fieldNode.toString());
iterator.remove();
} else if (fieldName.equals("fields")) {
Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode; Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode;
for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) { for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
Map.Entry<String, Object> entry1 = fieldsIterator.next(); Map.Entry<String, Object> entry1 = fieldsIterator.next();
@ -132,17 +128,10 @@ public class TypeParsers {
} }
} }
if (fields != null && pathType != null) { if (fields != null) {
for (Mapper.Builder field : fields) { for (Mapper.Builder field : fields) {
mainFieldBuilder.addMultiField(field); mainFieldBuilder.addMultiField(field);
} }
mainFieldBuilder.multiFieldPathType(pathType);
} else if (fields != null) {
for (Mapper.Builder field : fields) {
mainFieldBuilder.addMultiField(field);
}
} else if (pathType != null) {
mainFieldBuilder.multiFieldPathType(pathType);
} }
return mainFieldBuilder; return mainFieldBuilder;
} }
@ -337,10 +326,7 @@ public class TypeParsers {
public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
parserContext = parserContext.createMultiFieldContext(parserContext); parserContext = parserContext.createMultiFieldContext(parserContext);
if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (propName.equals("fields")) {
builder.multiFieldPathType(parsePathType(name, propNode.toString()));
return true;
} else if (propName.equals("fields")) {
final Map<String, Object> multiFieldsPropNodes; final Map<String, Object> multiFieldsPropNodes;
@ -457,17 +443,6 @@ public class TypeParsers {
} }
} }
public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException {
path = Strings.toUnderscoreCase(path);
if ("just_name".equals(path)) {
return ContentPath.Type.JUST_NAME;
} else if ("full".equals(path)) {
return ContentPath.Type.FULL;
} else {
throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]");
}
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) { public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) {
FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder(); FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder();

View File

@ -33,12 +33,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper;
@ -74,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
} }
public static class Defaults { public static class Defaults {
public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
public static final boolean ENABLE_LATLON = false; public static final boolean ENABLE_LATLON = false;
public static final boolean ENABLE_GEOHASH = false; public static final boolean ENABLE_GEOHASH = false;
public static final boolean ENABLE_GEOHASH_PREFIX = false; public static final boolean ENABLE_GEOHASH_PREFIX = false;
@ -83,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
} }
public abstract static class Builder<T extends Builder, Y extends BaseGeoPointFieldMapper> extends FieldMapper.Builder<T, Y> { public abstract static class Builder<T extends Builder, Y extends BaseGeoPointFieldMapper> extends FieldMapper.Builder<T, Y> {
protected ContentPath.Type pathType = Defaults.PATH_TYPE;
protected boolean enableLatLon = Defaults.ENABLE_LATLON; protected boolean enableLatLon = Defaults.ENABLE_LATLON;
@ -106,12 +102,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
return (GeoPointFieldType)fieldType; return (GeoPointFieldType)fieldType;
} }
@Override
public T multiFieldPathType(ContentPath.Type pathType) {
this.pathType = pathType;
return builder;
}
@Override @Override
public T fieldDataSettings(Settings settings) { public T fieldDataSettings(Settings settings) {
this.fieldDataSettings = settings; this.fieldDataSettings = settings;
@ -159,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
} }
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo); StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
public Y build(Mapper.BuilderContext context) { public Y build(Mapper.BuilderContext context) {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType; GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
DoubleFieldMapper latMapper = null; DoubleFieldMapper latMapper = null;
@ -191,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix); geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
} }
context.path().remove(); context.path().remove();
context.path().pathType(origPathType);
return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType, return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
} }
} }
@ -365,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected final DoubleFieldMapper lonMapper; protected final DoubleFieldMapper lonMapper;
protected final ContentPath.Type pathType;
protected final StringFieldMapper geoHashMapper; protected final StringFieldMapper geoHashMapper;
protected Explicit<Boolean> ignoreMalformed; protected Explicit<Boolean> ignoreMalformed;
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
this.pathType = pathType;
this.latMapper = latMapper; this.latMapper = latMapper;
this.lonMapper = lonMapper; this.lonMapper = lonMapper;
this.geoHashMapper = geoHashMapper; this.geoHashMapper = geoHashMapper;
@ -388,17 +371,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith; BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith;
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (gpfmMergeWith.ignoreMalformed.explicit()) {
if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
}
} }
} }
@ -441,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override @Override
public Mapper parse(ParseContext context) throws IOException { public Mapper parse(ParseContext context) throws IOException {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(simpleName()); context.path().add(simpleName());
GeoPoint sparse = context.parseExternalValue(GeoPoint.class); GeoPoint sparse = context.parseExternalValue(GeoPoint.class);
@ -487,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
} }
context.path().remove(); context.path().remove();
context.path().pathType(origPathType);
return null; return null;
} }
@ -512,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override @Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params); super.doXContentBody(builder, includeDefaults, params);
if (includeDefaults || pathType != Defaults.PATH_TYPE) {
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
}
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) { if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
builder.field("lat_lon", fieldType().isLatLonEnabled()); builder.field("lat_lon", fieldType().isLatLonEnabled());
} }

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
@Override @Override
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) { CopyTo copyTo) {
fieldType.setTokenized(false); fieldType.setTokenized(false);
setupFieldType(context); setupFieldType(context);
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
geoHashMapper, multiFields, ignoreMalformed, copyTo); geoHashMapper, multiFields, ignoreMalformed, copyTo);
} }
@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
} }
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
ignoreMalformed, copyTo); ignoreMalformed, copyTo);
} }

View File

@ -35,11 +35,9 @@ import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.util.ByteUtils;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField; import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
@ -111,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
@Override @Override
public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType, public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) { CopyTo copyTo) {
fieldType.setTokenized(false); fieldType.setTokenized(false);
setupFieldType(context); setupFieldType(context);
fieldType.setHasDocValues(false); fieldType.setHasDocValues(false);
defaultFieldType.setHasDocValues(false); defaultFieldType.setHasDocValues(false);
return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo); geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo);
} }
@ -288,32 +286,27 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
protected Explicit<Boolean> coerce; protected Explicit<Boolean> coerce;
public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce, CopyTo copyTo) { Explicit<Boolean> coerce, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
ignoreMalformed, copyTo); ignoreMalformed, copyTo);
this.coerce = coerce; this.coerce = coerce;
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith; GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith;
if (gpfmMergeWith.coerce.explicit()) { if (gpfmMergeWith.coerce.explicit()) {
if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) { if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) {
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
} }
} }
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (gpfmMergeWith.coerce.explicit()) {
if (gpfmMergeWith.coerce.explicit()) { this.coerce = gpfmMergeWith.coerce;
this.coerce = gpfmMergeWith.coerce;
}
} }
} }

View File

@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException; import java.io.IOException;
@ -475,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!this.getClass().equals(mergeWith.getClass())) {
return;
}
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (gsfm.coerce.explicit()) {
if (gsfm.coerce.explicit()) { this.coerce = gsfm.coerce;
this.coerce = gsfm.coerce;
}
} }
} }

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper {
public interface IncludeInAll { public interface IncludeInAll {
void includeInAll(Boolean includeInAll); /**
* If {@code includeInAll} is not null then return a copy of this mapper
* that will include values in the _all field according to {@code includeInAll}.
*/
Mapper includeInAll(Boolean includeInAll);
void includeInAllIfNotSet(Boolean includeInAll); /**
* If {@code includeInAll} is not null and not set on this mapper yet, then
* return a copy of this mapper that will include values in the _all field
* according to {@code includeInAll}.
*/
Mapper includeInAllIfNotSet(Boolean includeInAll);
void unsetIncludeInAll(); /**
* If {@code includeInAll} was already set on this mapper then return a copy
* of this mapper that has {@code includeInAll} not set.
*/
Mapper unsetIncludeInAll();
} }
public static final String NAME = "_all"; public static final String NAME = "_all";
@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
} }
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
} }
@Override @Override

View File

@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
if (!mergeResult.simulate()) { if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { this.enabledState = indexFieldMapperMergeWith.enabledState;
this.enabledState = indexFieldMapperMergeWith.enabledState;
}
} }
} }

View File

@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
if (Objects.equals(parentType, fieldMergeWith.parentType) == false) { if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
} }
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper {
parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here
if (childJoinFieldType != null) { if (childJoinFieldType != null) {
// TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type. // TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type.
childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false); childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false);
} }
for (String conflict : conflicts) { if (conflicts.isEmpty() == false) {
mergeResult.addConflict(conflict); throw new IllegalArgumentException("Merge conflicts: " + conflicts);
} }
if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (active()) {
childJoinFieldType = fieldMergeWith.childJoinFieldType.clone(); childJoinFieldType = fieldMergeWith.childJoinFieldType.clone();
} }
} }

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -41,11 +41,11 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -310,18 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
if (mergeResult.simulate()) { List<String> conflicts = new ArrayList<>();
if (this.enabled != sourceMergeWith.enabled) { if (this.enabled != sourceMergeWith.enabled) {
mergeResult.addConflict("Cannot update enabled setting for [_source]"); conflicts.add("Cannot update enabled setting for [_source]");
} }
if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
mergeResult.addConflict("Cannot update includes setting for [_source]"); conflicts.add("Cannot update includes setting for [_source]");
} }
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
mergeResult.addConflict("Cannot update excludes setting for [_source]"); conflicts.add("Cannot update excludes setting for [_source]");
} }
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
} }
} }
} }

View File

@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.SourceToParse;
@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) {
mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled.");
} else { } else {
if (!mergeResult.simulate()) { this.enabledState = ttlMergeWith.enabledState;
this.enabledState = ttlMergeWith.enabledState;
}
} }
} }
if (ttlMergeWith.defaultTTL != -1) { if (ttlMergeWith.defaultTTL != -1) {
// we never build the default when the field is disabled so we should also not set it // we never build the default when the field is disabled so we should also not set it
// (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster)
if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { if (enabledState == EnabledAttributeMapper.ENABLED) {
this.defaultTTL = ttlMergeWith.defaultTTL; this.defaultTTL = ttlMergeWith.defaultTTL;
} }
} }

View File

@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper;
import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.LongFieldMapper;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
super.merge(mergeWith, mergeResult); super.doMerge(mergeWith, updateAllTypes);
if (!mergeResult.simulate()) { if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { this.enabledState = timestampFieldMapperMergeWith.enabledState;
this.enabledState = timestampFieldMapperMergeWith.enabledState; }
} if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
} else { return;
if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { }
return; List<String> conflicts = new ArrayList<>();
} if (defaultTimestamp == null) {
if (defaultTimestamp == null) { conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
} else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
} else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); }
} if (this.path != null) {
if (this.path != null) { if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
if (path.equals(timestampFieldMapperMergeWith.path()) == false) { conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
}
} else if (timestampFieldMapperMergeWith.path() != null) {
mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
} }
} else if (timestampFieldMapperMergeWith.path() != null) {
conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
}
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Conflicts: " + conflicts);
} }
} }
} }

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// nothing to do // nothing to do
} }
} }

View File

@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper {
setupFieldType(context); setupFieldType(context);
IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
fieldMapper.includeInAll(includeInAll); return (IpFieldMapper) fieldMapper.includeInAll(includeInAll);
return fieldMapper;
} }
@Override @Override

View File

@ -125,13 +125,13 @@ public class DynamicTemplate {
} }
public boolean match(ContentPath path, String name, String dynamicType) { public boolean match(ContentPath path, String name, String dynamicType) {
if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) { if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) {
return false; return false;
} }
if (match != null && !patternMatch(match, name)) { if (match != null && !patternMatch(match, name)) {
return false; return false;
} }
if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) { if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) {
return false; return false;
} }
if (unmatch != null && patternMatch(unmatch, name)) { if (unmatch != null && patternMatch(unmatch, name)) {

View File

@ -24,7 +24,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.collect.CopyOnWriteHashMap;
@ -40,7 +39,6 @@ import java.util.*;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.index.mapper.MapperBuilders.object; import static org.elasticsearch.index.mapper.MapperBuilders.object;
import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
/** /**
* *
@ -54,7 +52,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
public static final boolean ENABLED = true; public static final boolean ENABLED = true;
public static final Nested NESTED = Nested.NO; public static final Nested NESTED = Nested.NO;
public static final Dynamic DYNAMIC = null; // not set, inherited from root public static final Dynamic DYNAMIC = null; // not set, inherited from root
public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
} }
public static enum Dynamic { public static enum Dynamic {
@ -104,8 +101,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
protected Dynamic dynamic = Defaults.DYNAMIC; protected Dynamic dynamic = Defaults.DYNAMIC;
protected ContentPath.Type pathType = Defaults.PATH_TYPE;
protected Boolean includeInAll; protected Boolean includeInAll;
protected final List<Mapper.Builder> mappersBuilders = new ArrayList<>(); protected final List<Mapper.Builder> mappersBuilders = new ArrayList<>();
@ -130,11 +125,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return builder; return builder;
} }
public T pathType(ContentPath.Type pathType) {
this.pathType = pathType;
return builder;
}
public T includeInAll(boolean includeInAll) { public T includeInAll(boolean includeInAll) {
this.includeInAll = includeInAll; this.includeInAll = includeInAll;
return builder; return builder;
@ -147,8 +137,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
@Override @Override
public Y build(BuilderContext context) { public Y build(BuilderContext context) {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(pathType);
context.path().add(name); context.path().add(name);
Map<String, Mapper> mappers = new HashMap<>(); Map<String, Mapper> mappers = new HashMap<>();
@ -156,17 +144,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
Mapper mapper = builder.build(context); Mapper mapper = builder.build(context);
mappers.put(mapper.simpleName(), mapper); mappers.put(mapper.simpleName(), mapper);
} }
context.path().pathType(origPathType);
context.path().remove(); context.path().remove();
ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings()); ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings());
objectMapper.includeInAllIfNotSet(includeInAll); objectMapper = objectMapper.includeInAllIfNotSet(includeInAll);
return (Y) objectMapper; return (Y) objectMapper;
} }
protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) { protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers); return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers);
} }
} }
@ -179,7 +166,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
String fieldName = Strings.toUnderscoreCase(entry.getKey()); String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue(); Object fieldNode = entry.getValue();
if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || parseObjectProperties(name, fieldName, fieldNode, parserContext, builder)) { if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder)) {
iterator.remove(); iterator.remove();
} }
} }
@ -214,14 +201,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return false; return false;
} }
protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) {
if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
builder.pathType(parsePathType(name, fieldNode.toString()));
return true;
}
return false;
}
protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder) { protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder) {
boolean nested = false; boolean nested = false;
boolean nestedIncludeInParent = false; boolean nestedIncludeInParent = false;
@ -326,19 +305,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
private volatile Dynamic dynamic; private volatile Dynamic dynamic;
private final ContentPath.Type pathType;
private Boolean includeInAll; private Boolean includeInAll;
private volatile CopyOnWriteHashMap<String, Mapper> mappers; private volatile CopyOnWriteHashMap<String, Mapper> mappers;
ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) { ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers) {
super(name); super(name);
this.fullPath = fullPath; this.fullPath = fullPath;
this.enabled = enabled; this.enabled = enabled;
this.nested = nested; this.nested = nested;
this.dynamic = dynamic; this.dynamic = dynamic;
this.pathType = pathType;
if (mappers == null) { if (mappers == null) {
this.mappers = new CopyOnWriteHashMap<>(); this.mappers = new CopyOnWriteHashMap<>();
} else { } else {
@ -380,50 +356,58 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return this.enabled; return this.enabled;
} }
public ContentPath.Type pathType() {
return pathType;
}
public Mapper getMapper(String field) { public Mapper getMapper(String field) {
return mappers.get(field); return mappers.get(field);
} }
@Override @Override
public void includeInAll(Boolean includeInAll) { public ObjectMapper includeInAll(Boolean includeInAll) {
if (includeInAll == null) { if (includeInAll == null) {
return; return this;
} }
this.includeInAll = includeInAll;
ObjectMapper clone = clone();
clone.includeInAll = includeInAll;
// when called from outside, apply this on all the inner mappers // when called from outside, apply this on all the inner mappers
for (Mapper mapper : mappers.values()) { for (Mapper mapper : clone.mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) { if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll); clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll));
} }
} }
return clone;
} }
@Override @Override
public void includeInAllIfNotSet(Boolean includeInAll) { public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) {
if (this.includeInAll == null) { if (includeInAll == null || this.includeInAll != null) {
this.includeInAll = includeInAll; return this;
} }
ObjectMapper clone = clone();
clone.includeInAll = includeInAll;
// when called from outside, apply this on all the inner mappers // when called from outside, apply this on all the inner mappers
for (Mapper mapper : mappers.values()) { for (Mapper mapper : clone.mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) { if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll));
} }
} }
return clone;
} }
@Override @Override
public void unsetIncludeInAll() { public ObjectMapper unsetIncludeInAll() {
includeInAll = null; if (includeInAll == null) {
return this;
}
ObjectMapper clone = clone();
clone.includeInAll = null;
// when called from outside, apply this on all the inner mappers // when called from outside, apply this on all the inner mappers
for (Mapper mapper : mappers.values()) { for (Mapper mapper : mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) { if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll());
} }
} }
return clone;
} }
public Nested nested() { public Nested nested() {
@ -434,14 +418,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return this.nestedTypeFilter; return this.nestedTypeFilter;
} }
/** protected void putMapper(Mapper mapper) {
* Put a new mapper.
* NOTE: this method must be called under the current {@link DocumentMapper}
* lock if concurrent updates are expected.
*/
public void putMapper(Mapper mapper) {
if (mapper instanceof AllFieldMapper.IncludeInAll) { if (mapper instanceof AllFieldMapper.IncludeInAll) {
((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
} }
mappers = mappers.copyAndPut(mapper.simpleName(), mapper); mappers = mappers.copyAndPut(mapper.simpleName(), mapper);
} }
@ -464,64 +443,43 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
} }
@Override @Override
public void merge(final Mapper mergeWith, final MergeResult mergeResult) { public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
if (!(mergeWith instanceof ObjectMapper)) { if (!(mergeWith instanceof ObjectMapper)) {
mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
return;
} }
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
ObjectMapper merged = clone();
if (nested().isNested()) { merged.doMerge(mergeWithObject, updateAllTypes);
if (!mergeWithObject.nested().isNested()) { return merged;
mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested");
return;
}
} else {
if (mergeWithObject.nested().isNested()) {
mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested");
return;
}
}
if (!mergeResult.simulate()) {
if (mergeWithObject.dynamic != null) {
this.dynamic = mergeWithObject.dynamic;
}
}
doMerge(mergeWithObject, mergeResult);
List<Mapper> mappersToPut = new ArrayList<>();
List<ObjectMapper> newObjectMappers = new ArrayList<>();
List<FieldMapper> newFieldMappers = new ArrayList<>();
for (Mapper mapper : mergeWithObject) {
Mapper mergeWithMapper = mapper;
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
if (mergeIntoMapper == null) {
// no mapping, simply add it if not simulating
if (!mergeResult.simulate()) {
mappersToPut.add(mergeWithMapper);
MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers);
}
} else if (mergeIntoMapper instanceof MetadataFieldMapper == false) {
// root mappers can only exist here for backcompat, and are merged in Mapping
mergeIntoMapper.merge(mergeWithMapper, mergeResult);
}
}
if (!newFieldMappers.isEmpty()) {
mergeResult.addFieldMappers(newFieldMappers);
}
if (!newObjectMappers.isEmpty()) {
mergeResult.addObjectMappers(newObjectMappers);
}
// add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock)
for (Mapper mapper : mappersToPut) {
putMapper(mapper);
}
} }
protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) {
if (nested().isNested()) {
if (!mergeWith.nested().isNested()) {
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested");
}
} else {
if (mergeWith.nested().isNested()) {
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested");
}
}
if (mergeWith.dynamic != null) {
this.dynamic = mergeWith.dynamic;
}
for (Mapper mergeWithMapper : mergeWith) {
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
Mapper merged;
if (mergeIntoMapper == null) {
// no mapping, simply add it
merged = mergeWithMapper;
} else {
// root mappers can only exist here for backcompat, and are merged in Mapping
merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes);
}
putMapper(merged);
}
} }
@Override @Override
@ -549,9 +507,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
if (enabled != Defaults.ENABLED) { if (enabled != Defaults.ENABLED) {
builder.field("enabled", enabled); builder.field("enabled", enabled);
} }
if (pathType != Defaults.PATH_TYPE) {
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
}
if (includeInAll != null) { if (includeInAll != null) {
builder.field("include_in_all", includeInAll); builder.field("include_in_all", includeInAll);
} }

View File

@ -95,7 +95,7 @@ public class RootObjectMapper extends ObjectMapper {
@Override @Override
protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) { protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
assert !nested.isNested(); assert !nested.isNested();
FormatDateTimeFormatter[] dates = null; FormatDateTimeFormatter[] dates = null;
if (dynamicDateTimeFormatters == null) { if (dynamicDateTimeFormatters == null) {
@ -106,7 +106,7 @@ public class RootObjectMapper extends ObjectMapper {
} else { } else {
dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]); dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
} }
return new RootObjectMapper(name, enabled, dynamic, pathType, mappers, return new RootObjectMapper(name, enabled, dynamic, mappers,
dates, dates,
dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]), dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
dateDetection, numericDetection); dateDetection, numericDetection);
@ -196,15 +196,23 @@ public class RootObjectMapper extends ObjectMapper {
private volatile DynamicTemplate dynamicTemplates[]; private volatile DynamicTemplate dynamicTemplates[];
RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map<String, Mapper> mappers,
FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) { FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
super(name, name, enabled, Nested.NO, dynamic, pathType, mappers); super(name, name, enabled, Nested.NO, dynamic, mappers);
this.dynamicTemplates = dynamicTemplates; this.dynamicTemplates = dynamicTemplates;
this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
this.dateDetection = dateDetection; this.dateDetection = dateDetection;
this.numericDetection = numericDetection; this.numericDetection = numericDetection;
} }
/** Return a copy of this mapper that has the given {@code mapper} as a
* sub mapper. */
public RootObjectMapper copyAndPutMapper(Mapper mapper) {
RootObjectMapper clone = (RootObjectMapper) clone();
clone.putMapper(mapper);
return clone;
}
@Override @Override
public ObjectMapper mappingUpdate(Mapper mapper) { public ObjectMapper mappingUpdate(Mapper mapper) {
RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper); RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper);
@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper {
} }
@Override @Override
protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
return (RootObjectMapper) super.merge(mergeWith, updateAllTypes);
}
@Override
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
if (!mergeResult.simulate()) { // merge them
// merge them List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { boolean replaced = false;
boolean replaced = false; for (int i = 0; i < mergedTemplates.size(); i++) {
for (int i = 0; i < mergedTemplates.size(); i++) { if (mergedTemplates.get(i).name().equals(template.name())) {
if (mergedTemplates.get(i).name().equals(template.name())) { mergedTemplates.set(i, template);
mergedTemplates.set(i, template); replaced = true;
replaced = true;
}
}
if (!replaced) {
mergedTemplates.add(template);
} }
} }
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); if (!replaced) {
mergedTemplates.add(template);
}
} }
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
} }
@Override @Override

View File

@ -19,13 +19,11 @@
package org.elasticsearch.index.query.functionscore; package org.elasticsearch.index.query.functionscore;
import java.util.Map;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser; import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser;
import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser; import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper {
return functionParsers.get(parserName); return functionParsers.get(parserName);
} }
private static void addParser(ScoreFunctionParser<?> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) { private static void addParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
for (String name : scoreFunctionParser.getNames()) { for (String name : scoreFunctionParser.getNames()) {
map.put(name, scoreFunctionParser); map.put(name, scoreFunctionParser);
} }
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype()); @SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = scoreFunctionParser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb);
} }
} }

View File

@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer {
if (currentUpdate == null) { if (currentUpdate == null) {
recoveredTypes.put(type, update); recoveredTypes.put(type, update);
} else { } else {
MapperUtils.merge(currentUpdate, update); currentUpdate = currentUpdate.merge(update, false);
} }
} }

View File

@ -158,7 +158,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try { try {
if (translogGeneration != null) { if (translogGeneration != null) {
final Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); final Checkpoint checkpoint = readCheckpoint();
this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint);
if (recoveredTranslogs.isEmpty()) { if (recoveredTranslogs.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered"); throw new IllegalStateException("at least one reader must be recovered");
@ -421,13 +421,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return location; return location;
} }
} catch (AlreadyClosedException | IOException ex) { } catch (AlreadyClosedException | IOException ex) {
if (current.getTragicException() != null) { closeOnTragicEvent(ex);
try {
close();
} catch (Exception inner) {
ex.addSuppressed(inner);
}
}
throw ex; throw ex;
} catch (Throwable e) { } catch (Throwable e) {
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
@ -507,13 +501,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
current.sync(); current.sync();
} }
} catch (AlreadyClosedException | IOException ex) { } catch (AlreadyClosedException | IOException ex) {
if (current.getTragicException() != null) { closeOnTragicEvent(ex);
try {
close();
} catch (Exception inner) {
ex.addSuppressed(inner);
}
}
throw ex; throw ex;
} }
} }
@ -545,10 +533,23 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
ensureOpen(); ensureOpen();
return current.syncUpTo(location.translogLocation + location.size); return current.syncUpTo(location.translogLocation + location.size);
} }
} catch (AlreadyClosedException | IOException ex) {
closeOnTragicEvent(ex);
throw ex;
} }
return false; return false;
} }
private void closeOnTragicEvent(Throwable ex) {
if (current.getTragicException() != null) {
try {
close();
} catch (Exception inner) {
ex.addSuppressed(inner);
}
}
}
/** /**
* return stats * return stats
*/ */
@ -1433,4 +1434,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return current.getTragicException(); return current.getTragicException();
} }
/** Reads and returns the current checkpoint */
final Checkpoint readCheckpoint() throws IOException {
return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME));
}
} }

View File

@ -19,8 +19,12 @@
package org.elasticsearch.indices.flush; package org.elasticsearch.indices.flush;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -30,15 +34,15 @@ import static java.util.Collections.unmodifiableMap;
/** /**
* Result for all copies of a shard * Result for all copies of a shard
*/ */
public class ShardsSyncedFlushResult { public class ShardsSyncedFlushResult implements Streamable {
private String failureReason; private String failureReason;
private Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses; private Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses;
private String syncId; private String syncId;
private ShardId shardId; private ShardId shardId;
// some shards may be unassigned, so we need this as state // some shards may be unassigned, so we need this as state
private int totalShards; private int totalShards;
public ShardsSyncedFlushResult() { private ShardsSyncedFlushResult() {
} }
public ShardId getShardId() { public ShardId getShardId() {
@ -59,7 +63,7 @@ public class ShardsSyncedFlushResult {
/** /**
* success constructor * success constructor
*/ */
public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses) { public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses) {
this.failureReason = null; this.failureReason = null;
this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses)); this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses));
this.syncId = syncId; this.syncId = syncId;
@ -98,7 +102,7 @@ public class ShardsSyncedFlushResult {
*/ */
public int successfulShards() { public int successfulShards() {
int i = 0; int i = 0;
for (SyncedFlushService.SyncedFlushResponse result : shardResponses.values()) { for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) {
if (result.success()) { if (result.success()) {
i++; i++;
} }
@ -109,9 +113,9 @@ public class ShardsSyncedFlushResult {
/** /**
* @return an array of shard failures * @return an array of shard failures
*/ */
public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards() { public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards() {
Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failures = new HashMap<>(); Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failures = new HashMap<>();
for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> result : shardResponses.entrySet()) { for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> result : shardResponses.entrySet()) {
if (result.getValue().success() == false) { if (result.getValue().success() == false) {
failures.put(result.getKey(), result.getValue()); failures.put(result.getKey(), result.getValue());
} }
@ -123,11 +127,45 @@ public class ShardsSyncedFlushResult {
* @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush.
* Empty if synced flush failed before step three. * Empty if synced flush failed before step three.
*/ */
public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses() { public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses() {
return shardResponses; return shardResponses;
} }
public ShardId shardId() { public ShardId shardId() {
return shardId; return shardId;
} }
@Override
public void readFrom(StreamInput in) throws IOException {
failureReason = in.readOptionalString();
int numResponses = in.readInt();
shardResponses = new HashMap<>();
for (int i = 0; i < numResponses; i++) {
ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in);
SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in);
shardResponses.put(shardRouting, response);
}
syncId = in.readOptionalString();
shardId = ShardId.readShardId(in);
totalShards = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(failureReason);
out.writeInt(shardResponses.size());
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> entry : shardResponses.entrySet()) {
entry.getKey().writeTo(out);
entry.getValue().writeTo(out);
}
out.writeOptionalString(syncId);
shardId.writeTo(out);
out.writeInt(totalShards);
}
public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException {
ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult();
shardsSyncedFlushResult.readFrom(in);
return shardsSyncedFlushResult;
}
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.indices.flush;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
@ -81,9 +82,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
this.clusterService = clusterService; this.clusterService = clusterService;
this.transportService = transportService; this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexNameExpressionResolver = indexNameExpressionResolver;
transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler());
transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler());
transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler());
transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler()); transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler());
} }
@ -109,7 +109,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)} * a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details. * for more details.
*/ */
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<IndicesSyncedFlushResult> listener) { public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
final ClusterState state = clusterService.state(); final ClusterState state = clusterService.state();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap(); final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
@ -123,7 +123,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
if (numberOfShards == 0) { if (numberOfShards == 0) {
listener.onResponse(new IndicesSyncedFlushResult(results)); listener.onResponse(new SyncedFlushResponse(results));
return; return;
} }
final int finalTotalNumberOfShards = totalNumberOfShards; final int finalTotalNumberOfShards = totalNumberOfShards;
@ -138,7 +138,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult); results.get(index).add(syncedFlushResult);
if (countDown.countDown()) { if (countDown.countDown()) {
listener.onResponse(new IndicesSyncedFlushResult(results)); listener.onResponse(new SyncedFlushResponse(results));
} }
} }
@ -147,7 +147,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
logger.debug("{} unexpected error while executing synced flush", shardId); logger.debug("{} unexpected error while executing synced flush", shardId);
results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage())); results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage()));
if (countDown.countDown()) { if (countDown.countDown()) {
listener.onResponse(new IndicesSyncedFlushResult(results)); listener.onResponse(new SyncedFlushResponse(results));
} }
} }
}); });
@ -297,33 +297,33 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds, void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds,
final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) { final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) {
final CountDown countDown = new CountDown(shards.size()); final CountDown countDown = new CountDown(shards.size());
final Map<ShardRouting, SyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap(); final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) { for (final ShardRouting shard : shards) {
final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) { if (node == null) {
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new SyncedFlushResponse("unknown node")); results.put(shard, new ShardSyncedFlushResponse("unknown node"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue; continue;
} }
final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
if (expectedCommitId == null) { if (expectedCommitId == null) {
logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush")); results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue; continue;
} }
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId),
new BaseTransportResponseHandler<SyncedFlushResponse>() { new BaseTransportResponseHandler<ShardSyncedFlushResponse>() {
@Override @Override
public SyncedFlushResponse newInstance() { public ShardSyncedFlushResponse newInstance() {
return new SyncedFlushResponse(); return new ShardSyncedFlushResponse();
} }
@Override @Override
public void handleResponse(SyncedFlushResponse response) { public void handleResponse(ShardSyncedFlushResponse response) {
SyncedFlushResponse existing = results.put(shard, response); ShardSyncedFlushResponse existing = results.put(shard, response);
assert existing == null : "got two answers for node [" + node + "]"; assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException // count after the assert so we won't decrement twice in handleException
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
@ -332,7 +332,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override @Override
public void handleException(TransportException exp) { public void handleException(TransportException exp) {
logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard); logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard);
results.put(shard, new SyncedFlushResponse(exp.getMessage())); results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
} }
@ -346,7 +346,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
private void contDownAndSendResponseIfDone(String syncId, List<ShardRouting> shards, ShardId shardId, int totalShards, private void contDownAndSendResponseIfDone(String syncId, List<ShardRouting> shards, ShardId shardId, int totalShards,
ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, SyncedFlushResponse> results) { ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, ShardSyncedFlushResponse> results) {
if (countDown.countDown()) { if (countDown.countDown()) {
assert results.size() == shards.size(); assert results.size() == shards.size();
listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results));
@ -369,7 +369,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
continue; continue;
} }
transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() { transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() {
@Override @Override
public PreSyncedFlushResponse newInstance() { public PreSyncedFlushResponse newInstance() {
return new PreSyncedFlushResponse(); return new PreSyncedFlushResponse();
@ -401,7 +401,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
} }
private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId()); logger.trace("{} performing pre sync flush", request.shardId());
@ -410,7 +410,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
return new PreSyncedFlushResponse(commitId); return new PreSyncedFlushResponse(commitId);
} }
private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId());
@ -418,11 +418,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
switch (result) { switch (result) {
case SUCCESS: case SUCCESS:
return new SyncedFlushResponse(); return new ShardSyncedFlushResponse();
case COMMIT_MISMATCH: case COMMIT_MISMATCH:
return new SyncedFlushResponse("commit has changed"); return new ShardSyncedFlushResponse("commit has changed");
case PENDING_OPERATIONS: case PENDING_OPERATIONS:
return new SyncedFlushResponse("pending operations"); return new ShardSyncedFlushResponse("pending operations");
default: default:
throw new ElasticsearchException("unknown synced flush result [" + result + "]"); throw new ElasticsearchException("unknown synced flush result [" + result + "]");
} }
@ -439,19 +439,19 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
return new InFlightOpsResponse(opCount); return new InFlightOpsResponse(opCount);
} }
public final static class PreSyncedFlushRequest extends TransportRequest { public final static class PreShardSyncedFlushRequest extends TransportRequest {
private ShardId shardId; private ShardId shardId;
public PreSyncedFlushRequest() { public PreShardSyncedFlushRequest() {
} }
public PreSyncedFlushRequest(ShardId shardId) { public PreShardSyncedFlushRequest(ShardId shardId) {
this.shardId = shardId; this.shardId = shardId;
} }
@Override @Override
public String toString() { public String toString() {
return "PreSyncedFlushRequest{" + return "PreShardSyncedFlushRequest{" +
"shardId=" + shardId + "shardId=" + shardId +
'}'; '}';
} }
@ -504,16 +504,16 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
} }
public static final class SyncedFlushRequest extends TransportRequest { public static final class ShardSyncedFlushRequest extends TransportRequest {
private String syncId; private String syncId;
private Engine.CommitId expectedCommitId; private Engine.CommitId expectedCommitId;
private ShardId shardId; private ShardId shardId;
public SyncedFlushRequest() { public ShardSyncedFlushRequest() {
} }
public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
this.expectedCommitId = expectedCommitId; this.expectedCommitId = expectedCommitId;
this.shardId = shardId; this.shardId = shardId;
this.syncId = syncId; this.syncId = syncId;
@ -549,7 +549,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override @Override
public String toString() { public String toString() {
return "SyncedFlushRequest{" + return "ShardSyncedFlushRequest{" +
"shardId=" + shardId + "shardId=" + shardId +
",syncId='" + syncId + '\'' + ",syncId='" + syncId + '\'' +
'}'; '}';
@ -559,18 +559,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
/** /**
* Response for third step of synced flush (writing the sync id) for one shard copy * Response for third step of synced flush (writing the sync id) for one shard copy
*/ */
public static final class SyncedFlushResponse extends TransportResponse { public static final class ShardSyncedFlushResponse extends TransportResponse {
/** /**
* a non null value indicates a failure to sync flush. null means success * a non null value indicates a failure to sync flush. null means success
*/ */
String failureReason; String failureReason;
public SyncedFlushResponse() { public ShardSyncedFlushResponse() {
failureReason = null; failureReason = null;
} }
public SyncedFlushResponse(String failureReason) { public ShardSyncedFlushResponse(String failureReason) {
this.failureReason = failureReason; this.failureReason = failureReason;
} }
@ -596,11 +596,17 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override @Override
public String toString() { public String toString() {
return "SyncedFlushResponse{" + return "ShardSyncedFlushResponse{" +
"success=" + success() + "success=" + success() +
", failureReason='" + failureReason + '\'' + ", failureReason='" + failureReason + '\'' +
'}'; '}';
} }
public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException {
ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse();
shardSyncedFlushResponse.readFrom(in);
return shardSyncedFlushResponse;
}
} }
@ -677,18 +683,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
} }
} }
private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreSyncedFlushRequest> { private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
@Override @Override
public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception { public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(performPreSyncedFlush(request)); channel.sendResponse(performPreSyncedFlush(request));
} }
} }
private final class SyncedFlushTransportHandler implements TransportRequestHandler<SyncedFlushRequest> { private final class SyncedFlushTransportHandler implements TransportRequestHandler<ShardSyncedFlushRequest> {
@Override @Override
public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception { public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(performSyncedFlush(request)); channel.sendResponse(performSyncedFlush(request));
} }
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.indices.query;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.EmptyQueryBuilder; import org.elasticsearch.index.query.EmptyQueryBuilder;
@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent {
public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) { public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) {
super(settings); super(settings);
Map<String, QueryParser<?>> queryParsers = new HashMap<>(); Map<String, QueryParser<?>> queryParsers = new HashMap<>();
for (QueryParser<?> queryParser : injectedQueryParsers) { for (@SuppressWarnings("unchecked") QueryParser<? extends QueryBuilder> queryParser : injectedQueryParsers) {
for (String name : queryParser.names()) { for (String name : queryParser.names()) {
queryParsers.put(name, queryParser); queryParsers.put(name, queryParser);
} }
namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype()); @SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = queryParser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb);
} }
// EmptyQueryBuilder is not registered as query parser but used internally. // EmptyQueryBuilder is not registered as query parser but used internally.
// We need to register it with the NamedWriteableRegistry in order to serialize it // We need to register it with the NamedWriteableRegistry in order to serialize it
@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent {
public Map<String, QueryParser<?>> queryParsers() { public Map<String, QueryParser<?>> queryParsers() {
return queryParsers; return queryParsers;
} }
} }

View File

@ -0,0 +1,34 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.monitor.os;
public class DummyOsInfo extends OsInfo {
DummyOsInfo() {
refreshInterval = 0;
availableProcessors = 0;
allocatedProcessors = 0;
name = "dummy_name";
arch = "dummy_arch";
version = "dummy_version";
}
public static final DummyOsInfo INSTANCE = new DummyOsInfo();
}

View File

@ -108,6 +108,9 @@ public class OsInfo implements Streamable, ToXContent {
refreshInterval = in.readLong(); refreshInterval = in.readLong();
availableProcessors = in.readInt(); availableProcessors = in.readInt();
allocatedProcessors = in.readInt(); allocatedProcessors = in.readInt();
name = in.readOptionalString();
arch = in.readOptionalString();
version = in.readOptionalString();
} }
@Override @Override
@ -115,5 +118,8 @@ public class OsInfo implements Streamable, ToXContent {
out.writeLong(refreshInterval); out.writeLong(refreshInterval);
out.writeInt(availableProcessors); out.writeInt(availableProcessors);
out.writeInt(allocatedProcessors); out.writeInt(allocatedProcessors);
out.writeOptionalString(name);
out.writeOptionalString(arch);
out.writeOptionalString(version);
} }
} }

View File

@ -0,0 +1,28 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
public class DummyPluginInfo extends PluginInfo {
private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) {
super(name, description, site, version, jvm, classname, isolated);
}
public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true);
}

View File

@ -90,6 +90,7 @@ public class PluginManager {
"mapper-murmur3", "mapper-murmur3",
"mapper-size", "mapper-size",
"repository-azure", "repository-azure",
"repository-hdfs",
"repository-s3", "repository-s3",
"store-smb")); "store-smb"));

View File

@ -19,14 +19,14 @@
package org.elasticsearch.rest.action.admin.indices.flush; package org.elasticsearch.rest.action.admin.indices.flush;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.indices.flush.IndicesSyncedFlushResult;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.rest.action.support.RestBuilderListener;
@ -38,12 +38,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
*/ */
public class RestSyncedFlushAction extends BaseRestHandler { public class RestSyncedFlushAction extends BaseRestHandler {
private final SyncedFlushService syncedFlushService;
@Inject @Inject
public RestSyncedFlushAction(Settings settings, RestController controller, Client client, SyncedFlushService syncedFlushService) { public RestSyncedFlushAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client); super(settings, controller, client);
this.syncedFlushService = syncedFlushService;
controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/_flush/synced", this);
controller.registerHandler(POST, "/{index}/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this);
@ -53,12 +50,12 @@ public class RestSyncedFlushAction extends BaseRestHandler {
@Override @Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen());
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
syncedFlushService.attemptSyncedFlush(indices, indicesOptions, new RestBuilderListener<IndicesSyncedFlushResult>(channel) { syncedFlushRequest.indicesOptions(indicesOptions);
client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener<SyncedFlushResponse>(channel) {
@Override @Override
public RestResponse buildResponse(IndicesSyncedFlushResult results, XContentBuilder builder) throws Exception { public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception {
builder.startObject(); builder.startObject();
results.toXContent(builder, request); results.toXContent(builder, request);
builder.endObject(); builder.endObject();

View File

@ -84,12 +84,14 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.*;
import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.query.*; import org.elasticsearch.search.query.*;
import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -651,7 +653,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
} }
} }
private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException { private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchContextException {
// nothing to parse... // nothing to parse...
if (source == null) { if (source == null) {
return; return;
@ -802,19 +804,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
fieldDataFieldsContext.setHitExecutionNeeded(true); fieldDataFieldsContext.setHitExecutionNeeded(true);
} }
if (source.highlighter() != null) { if (source.highlighter() != null) {
XContentParser highlighterParser = null; HighlightBuilder highlightBuilder = source.highlighter();
try { try {
highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter()); context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext()));
this.elementParsers.get("highlight").parse(highlighterParser, context); } catch (IOException e) {
} catch (Exception e) { throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
String sSource = "_na_";
try {
sSource = source.toString();
} catch (Throwable e1) {
// ignore
}
XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null;
throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e);
} }
} }
if (source.innerHits() != null) { if (source.innerHits() != null) {

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.search.aggregations.bucket.children; package org.elasticsearch.search.aggregations.bucket.children;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
@ -64,9 +65,6 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
private final LongObjectPagedHashMap<long[]> parentOrdToOtherBuckets; private final LongObjectPagedHashMap<long[]> parentOrdToOtherBuckets;
private boolean multipleBucketsPerParentOrd = false; private boolean multipleBucketsPerParentOrd = false;
// This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context)
private Set<LeafReaderContext> replay = new LinkedHashSet<>();
public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext,
Aggregator parent, String parentType, Query childFilter, Query parentFilter, Aggregator parent, String parentType, Query childFilter, Query parentFilter,
ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource,
@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
if (valuesSource == null) { if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR; return LeafBucketCollector.NO_OP_COLLECTOR;
} }
if (replay == null) {
throw new IllegalStateException();
}
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
assert globalOrdinals != null; assert globalOrdinals != null;
Scorer parentScorer = parentFilter.scorer(ctx); Scorer parentScorer = parentFilter.scorer(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer);
if (childFilter.scorer(ctx) != null) {
replay.add(ctx);
}
return new LeafBucketCollector() { return new LeafBucketCollector() {
@Override @Override
@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
@Override @Override
protected void doPostCollection() throws IOException { protected void doPostCollection() throws IOException {
final Set<LeafReaderContext> replay = this.replay; IndexReader indexReader = context().searchContext().searcher().getIndexReader();
this.replay = null; for (LeafReaderContext ctx : indexReader.leaves()) {
for (LeafReaderContext ctx : replay) {
DocIdSetIterator childDocsIter = childFilter.scorer(ctx); DocIdSetIterator childDocsIter = childFilter.scorer(ctx);
if (childDocsIter == null) { if (childDocsIter == null) {
continue; continue;

View File

@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms<LongTerms, LongTerms.Bucket> {
@Override @Override
public String getKeyAsString() { public String getKeyAsString() {
return String.valueOf(term); return formatter.format(term);
} }
@Override @Override

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.metrics.tophits; package org.elasticsearch.search.aggregations.metrics.tophits;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder {
return sourceBuilder; return sourceBuilder;
} }
public BytesReference highlighter() { public HighlightBuilder highlighter() {
return sourceBuilder().highlighter(); return sourceBuilder().highlighter();
} }

View File

@ -144,7 +144,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private List<BytesReference> aggregations; private List<BytesReference> aggregations;
private BytesReference highlightBuilder; private HighlightBuilder highlightBuilder;
private BytesReference suggestBuilder; private BytesReference suggestBuilder;
@ -405,22 +405,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* Adds highlight to perform as part of the search. * Adds highlight to perform as part of the search.
*/ */
public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) { public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) {
try { this.highlightBuilder = highlightBuilder;
XContentBuilder builder = XContentFactory.jsonBuilder(); return this;
builder.startObject();
highlightBuilder.innerXContent(builder);
builder.endObject();
this.highlightBuilder = builder.bytes();
return this;
} catch (IOException e) {
throw new RuntimeException(e);
}
} }
/** /**
* Gets the bytes representing the hightlighter builder for this request. * Gets the hightlighter builder for this request.
*/ */
public BytesReference highlighter() { public HighlightBuilder highlighter() {
return highlightBuilder; return highlightBuilder;
} }
@ -813,8 +805,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} }
builder.aggregations = aggregations; builder.aggregations = aggregations;
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
builder.highlightBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.innerHitsBuilder = xContentBuilder.bytes(); builder.innerHitsBuilder = xContentBuilder.bytes();
@ -1012,10 +1003,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} }
if (highlightBuilder != null) { if (highlightBuilder != null) {
builder.field(HIGHLIGHT_FIELD.getPreferredName()); this.highlightBuilder.toXContent(builder, params);
XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder);
parser.nextToken();
builder.copyCurrentStructure(parser);
} }
if (innerHitsBuilder != null) { if (innerHitsBuilder != null) {
@ -1158,7 +1146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} }
builder.from = in.readVInt(); builder.from = in.readVInt();
if (in.readBoolean()) { if (in.readBoolean()) {
builder.highlightBuilder = in.readBytesReference(); builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in);
} }
boolean hasIndexBoost = in.readBoolean(); boolean hasIndexBoost = in.readBoolean();
if (hasIndexBoost) { if (hasIndexBoost) {
@ -1259,7 +1247,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
boolean hasHighlightBuilder = highlightBuilder != null; boolean hasHighlightBuilder = highlightBuilder != null;
out.writeBoolean(hasHighlightBuilder); out.writeBoolean(hasHighlightBuilder);
if (hasHighlightBuilder) { if (hasHighlightBuilder) {
out.writeBytesReference(highlightBuilder); highlightBuilder.writeTo(out);
} }
boolean hasIndexBoost = indexBoost != null; boolean hasIndexBoost = indexBoost != null;
out.writeBoolean(hasIndexBoost); out.writeBoolean(hasIndexBoost);

View File

@ -20,7 +20,6 @@
package org.elasticsearch.search.fetch.innerhits; package org.elasticsearch.search.fetch.innerhits;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent {
return this; return this;
} }
public BytesReference highlighter() { public HighlightBuilder highlighter() {
return sourceBuilder().highlighter(); return sourceBuilder().highlighter();
} }

View File

@ -29,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.search.highlight.HighlightBuilder.Order;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -78,7 +80,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
protected QueryBuilder<?> highlightQuery; protected QueryBuilder<?> highlightQuery;
protected String order; protected Order order;
protected Boolean highlightFilter; protected Boolean highlightFilter;
@ -217,18 +219,26 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
/** /**
* The order of fragments per field. By default, ordered by the order in the * The order of fragments per field. By default, ordered by the order in the
* highlighted text. Can be <tt>score</tt>, which then it will be ordered * highlighted text. Can be <tt>score</tt>, which then it will be ordered
* by score of the fragments. * by score of the fragments, or <tt>none</TT>.
*/
public HB order(String order) {
return order(Order.fromString(order));
}
/**
* By default, fragments of a field are ordered by the order in the highlighted text.
* If set to {@link Order#SCORE}, this changes order to score of the fragments.
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public HB order(String order) { public HB order(Order scoreOrdered) {
this.order = order; this.order = scoreOrdered;
return (HB) this; return (HB) this;
} }
/** /**
* @return the value set by {@link #order(String)} * @return the value set by {@link #order(Order)}
*/ */
public String order() { public Order order() {
return this.order; return this.order;
} }
@ -395,7 +405,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery); builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery);
} }
if (order != null) { if (order != null) {
builder.field(ORDER_FIELD.getPreferredName(), order); builder.field(ORDER_FIELD.getPreferredName(), order.toString());
} }
if (highlightFilter != null) { if (highlightFilter != null) {
builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter); builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter);
@ -458,7 +468,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
} else if (token.isValue()) { } else if (token.isValue()) {
if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
highlightBuilder.order(parser.text()); highlightBuilder.order(Order.fromString(parser.text()));
} else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
highlightBuilder.highlightFilter(parser.booleanValue()); highlightBuilder.highlightFilter(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
@ -578,7 +588,9 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (in.readBoolean()) { if (in.readBoolean()) {
highlightQuery(in.readQuery()); highlightQuery(in.readQuery());
} }
order(in.readOptionalString()); if (in.readBoolean()) {
order(Order.PROTOTYPE.readFrom(in));
}
highlightFilter(in.readOptionalBoolean()); highlightFilter(in.readOptionalBoolean());
forceSource(in.readOptionalBoolean()); forceSource(in.readOptionalBoolean());
boundaryMaxScan(in.readOptionalVInt()); boundaryMaxScan(in.readOptionalVInt());
@ -609,7 +621,11 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (hasQuery) { if (hasQuery) {
out.writeQuery(highlightQuery); out.writeQuery(highlightQuery);
} }
out.writeOptionalString(order); boolean hasSetOrder = order != null;
out.writeBoolean(hasSetOrder);
if (hasSetOrder) {
order.writeTo(out);
}
out.writeOptionalBoolean(highlightFilter); out.writeOptionalBoolean(highlightFilter);
out.writeOptionalBoolean(forceSource); out.writeOptionalBoolean(forceSource);
out.writeOptionalVInt(boundaryMaxScan); out.writeOptionalVInt(boundaryMaxScan);

View File

@ -44,6 +44,7 @@ import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
@ -308,9 +309,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
*/ */
@SuppressWarnings({ "rawtypes", "unchecked" }) @SuppressWarnings({ "rawtypes", "unchecked" })
private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException { private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException {
targetOptionsBuilder.preTags(highlighterBuilder.preTags); if (highlighterBuilder.preTags != null) {
targetOptionsBuilder.postTags(highlighterBuilder.postTags); targetOptionsBuilder.preTags(highlighterBuilder.preTags);
targetOptionsBuilder.scoreOrdered("score".equals(highlighterBuilder.order)); }
if (highlighterBuilder.postTags != null) {
targetOptionsBuilder.postTags(highlighterBuilder.postTags);
}
if (highlighterBuilder.order != null) {
targetOptionsBuilder.scoreOrdered(highlighterBuilder.order == Order.SCORE);
}
if (highlighterBuilder.highlightFilter != null) { if (highlighterBuilder.highlightFilter != null) {
targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter); targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter);
} }
@ -326,9 +333,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.boundaryMaxScan != null) { if (highlighterBuilder.boundaryMaxScan != null) {
targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan); targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan);
} }
targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars)); if (highlighterBuilder.boundaryChars != null) {
targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType); targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter); }
if (highlighterBuilder.highlighterType != null) {
targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
}
if (highlighterBuilder.fragmenter != null) {
targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
}
if (highlighterBuilder.noMatchSize != null) { if (highlighterBuilder.noMatchSize != null) {
targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize); targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize);
} }
@ -338,7 +351,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.phraseLimit != null) { if (highlighterBuilder.phraseLimit != null) {
targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit); targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit);
} }
targetOptionsBuilder.options(highlighterBuilder.options); if (highlighterBuilder.options != null) {
targetOptionsBuilder.options(highlighterBuilder.options);
}
if (highlighterBuilder.highlightQuery != null) { if (highlighterBuilder.highlightQuery != null) {
targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context)); targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context));
} }
@ -545,4 +560,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
writeOptionsTo(out); writeOptionsTo(out);
} }
} }
public enum Order implements Writeable<Order> {
NONE, SCORE;
static Order PROTOTYPE = NONE;
@Override
public Order readFrom(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown Order ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.ordinal());
}
public static Order fromString(String order) {
if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) {
return Order.SCORE;
}
return NONE;
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
} }

View File

@ -0,0 +1,76 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
/**
* Base class for delegating transport response to a transport channel
*/
public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
/**
* Convenience method for delegating an empty response to the provided changed
*/
public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) {
@Override
public TransportResponse.Empty newInstance() {
return TransportResponse.Empty.INSTANCE;
}
};
}
private final ESLogger logger;
private final TransportChannel channel;
private final String extraInfoOnError;
protected TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
this.logger = logger;
this.channel = channel;
this.extraInfoOnError = extraInfoOnError;
}
@Override
public void handleResponse(T response) {
try {
channel.sendResponse(response);
} catch (IOException e) {
handleException(new TransportException(e));
}
}
@Override
public void handleException(TransportException exp) {
try {
channel.sendResponse(exp);
} catch (IOException e) {
logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")");
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}

View File

@ -26,7 +26,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
@ -36,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -205,142 +207,180 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} }
} }
class TribeClusterStateListener implements ClusterStateListener {
class TribeClusterStateListener implements ClusterStateListener {
private final String tribeName; private final String tribeName;
private final TribeNodeClusterStateTaskExecutor executor;
TribeClusterStateListener(Node tribeNode) { TribeClusterStateListener(Node tribeNode) {
this.tribeName = tribeNode.settings().get(TRIBE_NAME); String tribeName = tribeNode.settings().get(TRIBE_NAME);
this.tribeName = tribeName;
executor = new TribeNodeClusterStateTaskExecutor(tribeName);
} }
@Override @Override
public void clusterChanged(final ClusterChangedEvent event) { public void clusterChanged(final ClusterChangedEvent event) {
logger.debug("[{}] received cluster event, [{}]", tribeName, event.source()); logger.debug("[{}] received cluster event, [{}]", tribeName, event.source());
clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask(
@Override "cluster event from " + tribeName + ", " + event.source(),
public boolean runOnlyOnMaster() { event,
return false; ClusterStateTaskConfig.build(Priority.NORMAL),
executor,
(source, t) -> logger.warn("failed to process [{}]", t, source));
}
}
class TribeNodeClusterStateTaskExecutor implements ClusterStateTaskExecutor<ClusterChangedEvent> {
private final String tribeName;
TribeNodeClusterStateTaskExecutor(String tribeName) {
this.tribeName = tribeName;
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public BatchResult<ClusterChangedEvent> execute(ClusterState currentState, List<ClusterChangedEvent> tasks) throws Exception {
ClusterState accumulator = ClusterState.builder(currentState).build();
BatchResult.Builder<ClusterChangedEvent> builder = BatchResult.builder();
try {
// we only need to apply the latest cluster state update
accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1));
builder.successes(tasks);
} catch (Throwable t) {
builder.failures(tasks, t);
}
return builder.build(accumulator);
}
private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) {
boolean clusterStateChanged = false;
ClusterState tribeState = task.state();
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
// -- merge nodes
// go over existing nodes, and see if they need to be removed
for (DiscoveryNode discoNode : currentState.nodes()) {
String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (tribeState.nodes().get(discoNode.id()) == null) {
clusterStateChanged = true;
logger.info("[{}] removing node [{}]", tribeName, discoNode);
nodes.remove(discoNode.id());
}
} }
}
// go over tribe nodes, and see if they need to be added
for (DiscoveryNode tribe : tribeState.nodes()) {
if (currentState.nodes().get(tribe.id()) == null) {
// a new node, add it, but also add the tribe name to the attributes
Map<String, String> tribeAttr = new HashMap<>();
for (ObjectObjectCursor<String, String> attr : tribe.attributes()) {
tribeAttr.put(attr.key, attr.value);
}
tribeAttr.put(TRIBE_NAME, tribeName);
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.put(discoNode);
}
}
@Override // -- merge metadata
public ClusterState execute(ClusterState currentState) throws Exception { ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
ClusterState tribeState = event.state(); MetaData.Builder metaData = MetaData.builder(currentState.metaData());
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
// -- merge nodes // go over existing indices, and see if they need to be removed
// go over existing nodes, and see if they need to be removed for (IndexMetaData index : currentState.metaData()) {
for (DiscoveryNode discoNode : currentState.nodes()) { String markedTribeName = index.getSettings().get(TRIBE_NAME);
String markedTribeName = discoNode.attributes().get(TRIBE_NAME); if (markedTribeName != null && markedTribeName.equals(tribeName)) {
if (markedTribeName != null && markedTribeName.equals(tribeName)) { IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex());
if (tribeState.nodes().get(discoNode.id()) == null) { clusterStateChanged = true;
logger.info("[{}] removing node [{}]", tribeName, discoNode); if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) {
nodes.remove(discoNode.id()); logger.info("[{}] removing index [{}]", tribeName, index.getIndex());
} removeIndex(blocks, metaData, routingTable, index);
} } else {
// always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.getIndex()));
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
} }
// go over tribe nodes, and see if they need to be added }
for (DiscoveryNode tribe : tribeState.nodes()) { }
if (currentState.nodes().get(tribe.id()) == null) { // go over tribe one, and see if they need to be added
// a new node, add it, but also add the tribe name to the attributes for (IndexMetaData tribeIndex : tribeState.metaData()) {
Map<String, String> tribeAttr = new HashMap<>(); // if there is no routing table yet, do nothing with it...
for (ObjectObjectCursor<String, String> attr : tribe.attributes()) { IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex());
tribeAttr.put(attr.key, attr.value); if (table == null) {
} continue;
tribeAttr.put(TRIBE_NAME, tribeName); }
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
logger.info("[{}] adding node [{}]", tribeName, discoNode); if (indexMetaData == null) {
nodes.put(discoNode); if (!droppedIndices.contains(tribeIndex.getIndex())) {
} // a new index, add it, and add the tribe name as a setting
clusterStateChanged = true;
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex());
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} }
} else {
// -- merge metadata String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME);
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); if (!tribeName.equals(existingFromTribe)) {
MetaData.Builder metaData = MetaData.builder(currentState.metaData()); // we have a potential conflict on index names, decide what to do...
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); if (ON_CONFLICT_ANY.equals(onConflict)) {
// go over existing indices, and see if they need to be removed // we chose any tribe, carry on
for (IndexMetaData index : currentState.metaData()) { } else if (ON_CONFLICT_DROP.equals(onConflict)) {
String markedTribeName = index.getSettings().get(TRIBE_NAME); // drop the indices, there is a conflict
if (markedTribeName != null && markedTribeName.equals(tribeName)) { clusterStateChanged = true;
IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { removeIndex(blocks, metaData, routingTable, tribeIndex);
logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); droppedIndices.add(tribeIndex.getIndex());
removeIndex(blocks, metaData, routingTable, index); } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
} else { // on conflict, prefer a tribe...
// always make sure to update the metadata and routing table, in case String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
// there are changes in them (new mapping, shards moving from initializing to started) if (tribeName.equals(preferredTribeName)) {
routingTable.add(tribeState.routingTable().index(index.getIndex())); // the new one is hte preferred one, replace...
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); clusterStateChanged = true;
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
} removeIndex(blocks, metaData, routingTable, tribeIndex);
}
}
// go over tribe one, and see if they need to be added
for (IndexMetaData tribeIndex : tribeState.metaData()) {
// if there is no routing table yet, do nothing with it...
IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex());
if (table == null) {
continue;
}
final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
if (indexMetaData == null) {
if (!droppedIndices.contains(tribeIndex.getIndex())) {
// a new index, add it, and add the tribe name as a setting
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex());
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} } // else: either the existing one is the preferred one, or we haven't seen one, carry on
} else {
String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME);
if (!tribeName.equals(existingFromTribe)) {
// we have a potential conflict on index names, decide what to do...
if (ON_CONFLICT_ANY.equals(onConflict)) {
// we chose any tribe, carry on
} else if (ON_CONFLICT_DROP.equals(onConflict)) {
// drop the indices, there is a conflict
logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex);
droppedIndices.add(tribeIndex.getIndex());
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
// on conflict, prefer a tribe...
String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
if (tribeName.equals(preferredTribeName)) {
// the new one is hte preferred one, replace...
logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex);
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} // else: either the existing one is the preferred one, or we haven't seen one, carry on
}
}
} }
} }
return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build();
} }
}
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { if (!clusterStateChanged) {
metaData.remove(index.getIndex()); return currentState;
routingTable.remove(index.getIndex()); } else {
blocks.removeIndexBlocks(index.getIndex()); return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build();
} }
}
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) {
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); metaData.remove(index.getIndex());
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); routingTable.remove(index.getIndex());
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); blocks.removeIndexBlocks(index.getIndex());
if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { }
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
}
if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) {
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
}
if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) {
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
}
}
@Override private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
public void onFailure(String source, Throwable t) { Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
logger.warn("failed to process [{}]", t, source); metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
} routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
}); if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) {
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
}
if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) {
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
}
if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) {
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
}
} }
} }
} }

View File

@ -50,6 +50,7 @@ OFFICIAL PLUGINS
- mapper-murmur3 - mapper-murmur3
- mapper-size - mapper-size
- repository-azure - repository-azure
- repository-hdfs
- repository-s3 - repository-s3
- store-smb - store-smb

View File

@ -17,16 +17,20 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.indices.flush; package org.elasticsearch.action.admin.indices.flush;
import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.ObjectIntMap;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.flush.IndicesSyncedFlushResult.ShardCounts; import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
import org.elasticsearch.indices.flush.SyncedFlushService.SyncedFlushResponse; import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -42,14 +46,11 @@ import static org.hamcrest.Matchers.hasSize;
public class SyncedFlushUnitTests extends ESTestCase { public class SyncedFlushUnitTests extends ESTestCase {
private static class TestPlan { private static class TestPlan {
public ShardCounts totalCounts; public SyncedFlushResponse.ShardCounts totalCounts;
public Map<String, ShardCounts> countsPerIndex = new HashMap<>(); public Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>(); public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
public SyncedFlushResponse result;
public IndicesSyncedFlushResult result;
} }
public void testIndicesSyncedFlushResult() throws IOException { public void testIndicesSyncedFlushResult() throws IOException {
@ -76,6 +77,56 @@ public class SyncedFlushUnitTests extends ESTestCase {
} }
} }
public void testResponseStreaming() throws IOException {
final TestPlan testPlan = createTestPlan();
assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
BytesStreamOutput out = new BytesStreamOutput();
testPlan.result.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
SyncedFlushResponse readResponse = new SyncedFlushResponse();
readResponse.readFrom(in);
assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total));
assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful));
assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed));
assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
assertThat(readResponse.shardsResultPerIndex.size(), equalTo(testPlan.result.getShardsResultPerIndex().size()));
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : readResponse.getShardsResultPerIndex().entrySet()) {
List<ShardsSyncedFlushResult> originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey());
assertNotNull(originalShardsResults);
List<ShardsSyncedFlushResult> readShardsResults = entry.getValue();
assertThat(readShardsResults.size(), equalTo(originalShardsResults.size()));
for (int i = 0; i < readShardsResults.size(); i++) {
ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i);
ShardsSyncedFlushResult readShardResult = readShardsResults.get(i);
assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason()));
assertThat(originalShardResult.failed(), equalTo(readShardResult.failed()));
assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId()));
assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards()));
assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId()));
assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards()));
assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size()));
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.failedShards().entrySet()) {
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey());
assertNotNull(readShardResponse);
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
}
assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size()));
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.shardResponses().entrySet()) {
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey());
assertNotNull(readShardResponse);
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
}
}
}
}
private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) { private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) {
assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total));
assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful));
@ -105,32 +156,33 @@ public class SyncedFlushUnitTests extends ESTestCase {
failures++; failures++;
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
} else { } else {
Map<ShardRouting, SyncedFlushResponse> shardResponses = new HashMap<>(); Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
for (int copy = 0; copy < replicas + 1; copy++) { for (int copy = 0; copy < replicas + 1; copy++) {
final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null,
copy == 0, ShardRoutingState.STARTED, 0); copy == 0, ShardRoutingState.STARTED, 0);
if (randomInt(5) < 2) { if (randomInt(5) < 2) {
// shard copy failure // shard copy failure
failed++; failed++;
failures++; failures++;
shardResponses.put(shardRouting, new SyncedFlushResponse("copy failure " + shardId)); shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
} else { } else {
successful++; successful++;
shardResponses.put(shardRouting, new SyncedFlushResponse()); shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse());
} }
} }
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
} }
} }
indicesResults.put(index, shardsResults); indicesResults.put(index, shardsResults);
testPlan.countsPerIndex.put(index, new ShardCounts(shards * (replicas + 1), successful, failed)); testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
testPlan.expectedFailuresPerIndex.put(index, failures); testPlan.expectedFailuresPerIndex.put(index, failures);
totalFailed += failed; totalFailed += failed;
totalShards += shards * (replicas + 1); totalShards += shards * (replicas + 1);
totalSuccesful += successful; totalSuccesful += successful;
} }
testPlan.result = new IndicesSyncedFlushResult(indicesResults); testPlan.result = new SyncedFlushResponse(indicesResults);
testPlan.totalCounts = new ShardCounts(totalShards, totalSuccesful, totalFailed); testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed);
return testPlan; return testPlan;
} }
} }

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent.builder;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.io.FastCharArrayWriter;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -39,6 +38,7 @@ import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Calendar; import java.util.Calendar;
import java.util.Collections;
import java.util.Date; import java.util.Date;
import java.util.GregorianCalendar; import java.util.GregorianCalendar;
import java.util.HashMap; import java.util.HashMap;
@ -51,9 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConvers
import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE; import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
/**
*
*/
public class XContentBuilderTests extends ESTestCase { public class XContentBuilderTests extends ESTestCase {
public void testPrettyWithLfAtEnd() throws Exception { public void testPrettyWithLfAtEnd() throws Exception {
ByteArrayOutputStream os = new ByteArrayOutputStream(); ByteArrayOutputStream os = new ByteArrayOutputStream();
@ -350,4 +347,33 @@ public class XContentBuilderTests extends ESTestCase {
"}", string.trim()); "}", string.trim());
} }
public void testWriteMapWithNullKeys() throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
try {
builder.map(Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("field name cannot be null"));
}
}
public void testWriteMapValueWithNullKeys() throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
try {
builder.value(Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("field name cannot be null"));
}
}
public void testWriteFieldMapWithNullKeys() throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
try {
builder.field("map", Collections.singletonMap(null, "test"));
fail("write map should have failed");
} catch(IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("field name cannot be null"));
}
}
} }

View File

@ -369,7 +369,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
ensureGreen(); ensureGreen();
} else { } else {
logger.info("--> trying to sync flush"); logger.info("--> trying to sync flush");
assertEquals(SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").failedShards(), 0); assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0);
assertSyncIdsNotNull(); assertSyncIdsNotNull();
} }

View File

@ -0,0 +1,161 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gateway;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.indices.flush.SyncedFlushUtil;
import org.elasticsearch.indices.recovery.RecoveryState;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.ESIntegTestCase.client;
import static org.elasticsearch.test.ESIntegTestCase.internalCluster;
import static org.elasticsearch.test.ESTestCase.randomBoolean;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
/**
* Test of file reuse on recovery shared between integration tests and backwards
* compatibility tests.
*/
public class ReusePeerRecoverySharedTest {
/**
* Test peer reuse on recovery. This is shared between RecoverFromGatewayIT
* and RecoveryBackwardsCompatibilityIT.
*
* @param indexSettings
* settings for the index to test
* @param restartCluster
* runnable that will restart the cluster under test
* @param logger
* logger for logging
* @param useSyncIds
* should this use synced flush? can't use synced from in the bwc
* tests
*/
public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) {
/*
* prevent any rebalance actions during the peer recovery if we run into
* a relocation the reuse count will be 0 and this fails the test. We
* are testing here if we reuse the files on disk after full restarts
* for replicas.
*/
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings)
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
logger.info("--> indexing docs");
for (int i = 0; i < 1000; i++) {
client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
if ((i % 200) == 0) {
client().admin().indices().prepareFlush().execute().actionGet();
}
}
if (randomBoolean()) {
client().admin().indices().prepareFlush().execute().actionGet();
}
logger.info("--> running cluster health");
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
// just wait for merges
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get();
client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get();
if (useSyncIds == false) {
logger.info("--> disabling allocation while the cluster is shut down");
// Disable allocations while we are closing nodes
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)).get();
logger.info("--> full cluster restart");
restartCluster.run();
logger.info("--> waiting for cluster to return to green after first shutdown");
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
} else {
logger.info("--> trying to sync flush");
assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0);
assertSyncIdsNotNull();
}
logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time");
// Disable allocations while we are closing nodes
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE))
.get();
logger.info("--> full cluster restart");
restartCluster.run();
logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second ");
client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get();
if (useSyncIds) {
assertSyncIdsNotNull();
}
RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
long recovered = 0;
for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) {
if (file.name().startsWith("segments")) {
recovered += file.length();
}
}
if (!recoveryState.getPrimary() && (useSyncIds == false)) {
logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(),
recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered));
assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l));
// we have to recover the segments file since we commit the translog ID on engine startup
assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(),
equalTo(recoveryState.getIndex().totalBytes() - recovered));
assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(),
equalTo(1));
assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(),
equalTo(recoveryState.getIndex().totalFileCount() - 1));
assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0));
} else {
if (useSyncIds && !recoveryState.getPrimary()) {
logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}",
recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
}
assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l));
assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes()));
assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0));
assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount()));
}
}
}
public static void assertSyncIdsNotNull() {
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
@ -39,7 +38,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper;
import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -321,9 +319,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
DocumentMapper docMapperAfter = parser.parse(mappingAfter); DocumentMapper docMapperAfter = parser.parse(mappingAfter);
MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true, false); docMapperBefore.merge(docMapperAfter.mapping(), true, false);
assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
docMapperBefore.merge(docMapperAfter.mapping(), false, false); docMapperBefore.merge(docMapperAfter.mapping(), false, false);

View File

@ -23,7 +23,6 @@ import org.apache.lucene.analysis.*;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException; import java.io.IOException;
@ -60,13 +59,11 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
DocumentMapper stage2 = parser.parse(stage2Mapping); DocumentMapper stage2 = parser.parse(stage2Mapping);
MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); stage1.merge(stage2.mapping(), true, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
// Just simulated so merge hasn't happened yet // Just simulated so merge hasn't happened yet
assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword"));
mergeResult = stage1.merge(stage2.mapping(), false, false); stage1.merge(stage2.mapping(), false, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
// Just simulated so merge hasn't happened yet // Just simulated so merge hasn't happened yet
assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard"));
} }

View File

@ -371,9 +371,8 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase {
Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper); Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper);
assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy"));
MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false, false); defaultMapper.merge(mergeMapper.mapping(), false, false);
assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false));
assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class)));
DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field");

View File

@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
import org.elasticsearch.index.mapper.core.BooleanFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
@ -96,9 +95,6 @@ public class ExternalMapper extends FieldMapper {
@Override @Override
public ExternalMapper build(BuilderContext context) { public ExternalMapper build(BuilderContext context) {
ContentPath.Type origPathType = context.path().pathType();
context.path().pathType(ContentPath.Type.FULL);
context.path().add(name); context.path().add(name);
BinaryFieldMapper binMapper = binBuilder.build(context); BinaryFieldMapper binMapper = binBuilder.build(context);
BooleanFieldMapper boolMapper = boolBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context);
@ -108,7 +104,6 @@ public class ExternalMapper extends FieldMapper {
FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context);
context.path().remove(); context.path().remove();
context.path().pathType(origPathType);
setupFieldType(context); setupFieldType(context);
return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper, return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper,
@ -219,7 +214,7 @@ public class ExternalMapper extends FieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// ignore this for now // ignore this for now
} }

View File

@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.BooleanFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
@ -66,9 +65,9 @@ public class ExternalMetadataMapper extends MetadataFieldMapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) { public void doMerge(Mapper mergeWith, boolean updateAllTypes) {
if (!(mergeWith instanceof ExternalMetadataMapper)) { if (!(mergeWith instanceof ExternalMetadataMapper)) {
mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); throw new IllegalArgumentException("Trying to merge " + mergeWith + " with " + this);
} }
} }

View File

@ -87,7 +87,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
.startObject("f") .startObject("f")
.field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .field("type", ExternalMapperPlugin.EXTERNAL_UPPER)
.startObject("fields") .startObject("fields")
.startObject("f") .startObject("g")
.field("type", "string") .field("type", "string")
.field("store", "yes") .field("store", "yes")
.startObject("fields") .startObject("fields")
@ -107,7 +107,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
refresh(); refresh();
SearchResponse response = client().prepareSearch("test-idx") SearchResponse response = client().prepareSearch("test-idx")
.setQuery(QueryBuilders.termQuery("f.f.raw", "FOO BAR")) .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR"))
.execute().actionGet(); .execute().actionGet();
assertThat(response.getHits().totalHits(), equalTo((long) 1)); assertThat(response.getHits().totalHits(), equalTo((long) 1));

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;

View File

@ -30,17 +30,13 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.isIn;
public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase {
public void testDefaultConfiguration() throws IOException { public void testDefaultConfiguration() throws IOException {

View File

@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
@ -39,6 +38,7 @@ import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -59,15 +59,12 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().string(); .endObject().endObject().endObject().string();
DocumentMapper stage2 = parser.parse(stage2Mapping); DocumentMapper stage2 = parser.parse(stage2Mapping);
MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); stage1.merge(stage2.mapping(), true, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
// since we are simulating, we should not have the age mapping // since we are simulating, we should not have the age mapping
assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue());
assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue());
// now merge, don't simulate // now merge, don't simulate
mergeResult = stage1.merge(stage2.mapping(), false, false); stage1.merge(stage2.mapping(), false, false);
// there is still merge failures
assertThat(mergeResult.hasConflicts(), equalTo(false));
// but we have the age in // but we have the age in
assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue()); assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue());
assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue());
@ -83,8 +80,7 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase {
DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping);
assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false, false); mapper.merge(withDynamicMapper.mapping(), false, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
} }
@ -99,14 +95,19 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().string(); .endObject().endObject().endObject().string();
DocumentMapper nestedMapper = parser.parse(nestedMapping); DocumentMapper nestedMapper = parser.parse(nestedMapping);
MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true, false); try {
assertThat(mergeResult.hasConflicts(), equalTo(true)); objectMapper.merge(nestedMapper.mapping(), true, false);
assertThat(mergeResult.buildConflicts().length, equalTo(1)); fail();
assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); } catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested"));
}
mergeResult = nestedMapper.merge(objectMapper.mapping(), true, false); try {
assertThat(mergeResult.buildConflicts().length, equalTo(1)); nestedMapper.merge(objectMapper.mapping(), true, false);
assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested"));
}
} }
public void testMergeSearchAnalyzer() throws Exception { public void testMergeSearchAnalyzer() throws Exception {
@ -122,9 +123,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase {
DocumentMapper changed = parser.parse(mapping2); DocumentMapper changed = parser.parse(mapping2);
assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace"));
MergeResult mergeResult = existing.merge(changed.mapping(), false, false); existing.merge(changed.mapping(), false, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword"));
} }
@ -141,9 +141,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase {
DocumentMapper changed = parser.parse(mapping2); DocumentMapper changed = parser.parse(mapping2);
assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace"));
MergeResult mergeResult = existing.merge(changed.mapping(), false, false); existing.merge(changed.mapping(), false, false);
assertThat(mergeResult.hasConflicts(), equalTo(false));
assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard")); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard"));
assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14)); assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14));
} }

View File

@ -27,15 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.Arrays;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -62,8 +58,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase {
mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json");
DocumentMapper docMapper2 = parser.parse(mapping); DocumentMapper docMapper2 = parser.parse(mapping);
MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true, false); docMapper.merge(docMapper2.mapping(), true, false);
assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
docMapper.merge(docMapper2.mapping(), false, false); docMapper.merge(docMapper2.mapping(), false, false);
@ -84,8 +79,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase {
mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json");
DocumentMapper docMapper3 = parser.parse(mapping); DocumentMapper docMapper3 = parser.parse(mapping);
mergeResult = docMapper.merge(docMapper3.mapping(), true, false); docMapper.merge(docMapper3.mapping(), true, false);
assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
docMapper.merge(docMapper3.mapping(), false, false); docMapper.merge(docMapper3.mapping(), false, false);
@ -100,8 +94,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase {
mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json");
DocumentMapper docMapper4 = parser.parse(mapping); DocumentMapper docMapper4 = parser.parse(mapping);
mergeResult = docMapper.merge(docMapper4.mapping(), true, false); docMapper.merge(docMapper4.mapping(), true, false);
assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
docMapper.merge(docMapper4.mapping(), false, false); docMapper.merge(docMapper4.mapping(), false, false);

View File

@ -34,11 +34,9 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.VersionUtils;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map; import java.util.Map;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
public class DefaultSourceMappingTests extends ESSingleNodeTestCase { public class DefaultSourceMappingTests extends ESSingleNodeTestCase {
@ -194,13 +192,18 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase {
void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException {
DocumentMapper docMapper = parser.parse(mapping1); DocumentMapper docMapper = parser.parse(mapping1);
docMapper = parser.parse(docMapper.mappingSource().string()); docMapper = parser.parse(docMapper.mappingSource().string());
MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); if (conflicts.length == 0) {
docMapper.merge(parser.parse(mapping2).mapping(), true, false);
List<String> expectedConflicts = new ArrayList<>(Arrays.asList(conflicts)); } else {
for (String conflict : mergeResult.buildConflicts()) { try {
assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); docMapper.merge(parser.parse(mapping2).mapping(), true, false);
fail();
} catch (IllegalArgumentException e) {
for (String conflict : conflicts) {
assertThat(e.getMessage(), containsString(conflict));
}
}
} }
assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty());
} }
public void testEnabledNotUpdateable() throws Exception { public void testEnabledNotUpdateable() throws Exception {

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper;
@ -493,8 +492,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase {
String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject()
.endObject().endObject().endObject().endObject().string(); .endObject().endObject().endObject().endObject().string();
MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false);
assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts());
doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
.startObject() .startObject()

View File

@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
@ -515,8 +514,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase {
.startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject()
.endObject().endObject().string(); .endObject().endObject().string();
MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false, false); docMapper.merge(parser.parse(mapping).mapping(), false, false);
assertThat(mergeResult.buildConflicts().length, equalTo(0));
assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER));
assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array")); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array"));
} }
@ -618,9 +616,9 @@ public class TimestampMappingTests extends ESSingleNodeTestCase {
.field("index", indexValues.remove(randomInt(2))) .field("index", indexValues.remove(randomInt(2)))
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
DocumentMapperParser parser = createIndex("test", BWC_SETTINGS).mapperService().documentMapperParser(); MapperService mapperService = createIndex("test", BWC_SETTINGS).mapperService();
DocumentMapper docMapper = parser.parse(mapping); mapperService.merge("type", new CompressedXContent(mapping), true, false);
mapping = XContentFactory.jsonBuilder().startObject() mapping = XContentFactory.jsonBuilder().startObject()
.startObject("type") .startObject("type")
.startObject("_timestamp") .startObject("_timestamp")
@ -628,18 +626,11 @@ public class TimestampMappingTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); try {
List<String> expectedConflicts = new ArrayList<>(); mapperService.merge("type", new CompressedXContent(mapping), false, false);
expectedConflicts.add("mapper [_timestamp] has different [index] values"); fail();
expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); } catch (IllegalArgumentException e) {
if (indexValues.get(0).equals("not_analyzed") == false) { assertThat(e.getMessage(), containsString("mapper [_timestamp] has different [index] values"));
// if the only index value left is not_analyzed, then the doc values setting will be the same, but in the
// other two cases, it will change
expectedConflicts.add("mapper [_timestamp] has different [doc_values] values");
}
for (String conflict : mergeResult.buildConflicts()) {
assertThat(conflict, isIn(expectedConflicts));
} }
} }
@ -686,10 +677,15 @@ public class TimestampMappingTests extends ESSingleNodeTestCase {
void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException { void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException {
DocumentMapper docMapper = parser.parse(mapping1); DocumentMapper docMapper = parser.parse(mapping1);
docMapper = parser.parse(docMapper.mappingSource().string()); docMapper = parser.parse(docMapper.mappingSource().string());
MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); if (conflict == null) {
assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0 : 1)); docMapper.merge(parser.parse(mapping2).mapping(), true, false);
if (conflict != null) { } else {
assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); try {
docMapper.merge(parser.parse(mapping2).mapping(), true, false);
fail();
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(conflict));
}
} }
} }

Some files were not shown because too many files have changed in this diff Show More