Merge branch 'master' into new_index_settings

This commit is contained in:
Simon Willnauer 2016-01-19 10:13:48 +01:00
commit fbfa9f4925
206 changed files with 2050 additions and 2510 deletions

View File

@ -46,9 +46,9 @@ class ClusterFormationTasks {
/**
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
*
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
* Returns a NodeInfo object for the first node in the cluster.
*/
static Object setup(Project project, Task task, ClusterConfiguration config) {
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run!
return
@ -66,7 +66,7 @@ class ClusterFormationTasks {
task.dependsOn(wait)
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
return "${-> nodes[0].transportUri()}"
return nodes[0]
}
/** Adds a dependency on the given distribution */

View File

@ -20,7 +20,6 @@ package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.GradleException
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option
import org.gradle.api.plugins.JavaBasePlugin
@ -61,8 +60,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
// this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured
project.gradle.projectsEvaluated {
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.cluster', clusterUri)
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
systemProperty('tests.cluster', "${-> node.transportUri()}")
}
}

View File

@ -200,7 +200,7 @@ public class ActionModule extends AbstractModule {
private final Map<String, ActionEntry> actions = new HashMap<>();
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
static class ActionEntry<Request extends ActionRequest<Request>, Response extends ActionResponse> {
public final GenericAction<Request, Response> action;
public final Class<? extends TransportAction<Request, Response>> transportAction;
public final Class[] supportTransportActions;
@ -229,7 +229,7 @@ public class ActionModule extends AbstractModule {
* @param <Request> The request type.
* @param <Response> The response type.
*/
public <Request extends ActionRequest, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse> void registerAction(GenericAction<Request, Response> action, Class<? extends TransportAction<Request, Response>> transportAction, Class... supportTransportActions) {
actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -37,7 +37,7 @@ import java.util.List;
/**
* Refresh action.
*/
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, ReplicationRequest, ReplicationResponse> {
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, BasicReplicationRequest, ReplicationResponse> {
@Inject
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
@ -53,8 +53,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
}
@Override
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
return new ReplicationRequest(request, shardId);
protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
return new BasicReplicationRequest(request, shardId);
}
@Override

View File

@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
@ -41,7 +41,7 @@ import org.elasticsearch.transport.TransportService;
/**
*
*/
public class TransportShardRefreshAction extends TransportReplicationAction<ReplicationRequest, ReplicationRequest, ReplicationResponse> {
public class TransportShardRefreshAction extends TransportReplicationAction<BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> {
public static final String NAME = RefreshAction.NAME + "[s]";
@ -51,7 +51,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, ReplicationRequest::new, ReplicationRequest::new, ThreadPool.Names.REFRESH);
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
}
@Override
@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
}
@Override
protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(MetaData metaData, ReplicationRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId());
@ -68,7 +68,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
}
@Override
protected void shardOperationOnReplica(ReplicationRequest request) {
protected void shardOperationOnReplica(BasicReplicationRequest request) {
final ShardId shardId = request.shardId();
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
indexShard.refresh("api");

View File

@ -179,7 +179,7 @@ public class BulkProcessor implements Closeable {
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture scheduledFuture;
private final ScheduledFuture<?> scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong();
@ -250,24 +250,24 @@ public class BulkProcessor implements Closeable {
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public BulkProcessor add(IndexRequest request) {
return add((ActionRequest) request);
return add((ActionRequest<?>) request);
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public BulkProcessor add(DeleteRequest request) {
return add((ActionRequest) request);
return add((ActionRequest<?>) request);
}
/**
* Adds either a delete or an index request.
*/
public BulkProcessor add(ActionRequest request) {
public BulkProcessor add(ActionRequest<?> request) {
return add(request, null);
}
public BulkProcessor add(ActionRequest request, @Nullable Object payload) {
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
internalAdd(request, payload);
return this;
}
@ -282,7 +282,7 @@ public class BulkProcessor implements Closeable {
}
}
private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) {
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
ensureOpen();
bulkRequest.add(request, payload);
executeIfNeeded();

View File

@ -56,7 +56,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
private static final int REQUEST_OVERHEAD = 50;
final List<ActionRequest> requests = new ArrayList<>();
final List<ActionRequest<?>> requests = new ArrayList<>();
List<Object> payloads = null;
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
@ -72,21 +72,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* Creates a bulk request caused by some other request, which is provided as an
* argument so that its headers and context can be copied to the new request
*/
public BulkRequest(ActionRequest request) {
public BulkRequest(ActionRequest<?> request) {
super(request);
}
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(ActionRequest... requests) {
for (ActionRequest request : requests) {
public BulkRequest add(ActionRequest<?>... requests) {
for (ActionRequest<?> request : requests) {
add(request, null);
}
return this;
}
public BulkRequest add(ActionRequest request) {
public BulkRequest add(ActionRequest<?> request) {
return add(request, null);
}
@ -96,7 +96,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* @param payload Optional payload
* @return the current bulk request
*/
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
public BulkRequest add(ActionRequest<?> request, @Nullable Object payload) {
if (request instanceof IndexRequest) {
add((IndexRequest) request, payload);
} else if (request instanceof DeleteRequest) {
@ -112,8 +112,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/
public BulkRequest add(Iterable<ActionRequest> requests) {
for (ActionRequest request : requests) {
public BulkRequest add(Iterable<ActionRequest<?>> requests) {
for (ActionRequest<?> request : requests) {
add(request);
}
return this;
@ -196,15 +196,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
/**
* The list of requests in this bulk request.
*/
public List<ActionRequest> requests() {
public List<ActionRequest<?>> requests() {
return this.requests;
}
@Override
@SuppressWarnings("unchecked")
public List<? extends IndicesRequest> subRequests() {
List<IndicesRequest> indicesRequests = new ArrayList<>();
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
assert request instanceof IndicesRequest;
indicesRequests.add((IndicesRequest) request);
}
@ -486,7 +485,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException);
}
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
// We first check if refresh has been set
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
@ -535,7 +534,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
super.writeTo(out);
out.writeByte(consistencyLevel.id());
out.writeVInt(requests.size());
for (ActionRequest request : requests) {
for (ActionRequest<?> request : requests) {
if (request instanceof IndexRequest) {
out.writeByte((byte) 0);
} else if (request instanceof DeleteRequest) {

View File

@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
@ -109,7 +108,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
}
public static class Request extends SingleShardRequest implements IndicesRequest {
public static class Request extends SingleShardRequest<Request> implements IndicesRequest {
private int shardId;
private String preference;
@ -237,7 +236,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
shardResponse.readFrom(in);
items.add(new Item(slot, shardResponse));
} else {
items.add(new Item(slot, (Throwable)in.readThrowable()));
items.add(new Item(slot, in.readThrowable()));
}
}
}

View File

@ -40,13 +40,13 @@ public interface ActionFilter {
* Enables filtering the execution of an action on the request side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain);
void apply(Task task, String action, ActionRequest<?> request, ActionListener<?> listener, ActionFilterChain chain);
/**
* Enables filtering the execution of an action on the response side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain);
void apply(String action, ActionResponse response, ActionListener<?> listener, ActionFilterChain chain);
/**
* A simple base class for injectable action filters that spares the implementation from handling the
@ -60,7 +60,7 @@ public interface ActionFilter {
}
@Override
public final void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
public final void apply(Task task, String action, ActionRequest<?> request, ActionListener<?> listener, ActionFilterChain chain) {
if (apply(action, request, listener)) {
chain.proceed(task, action, request, listener);
}
@ -70,10 +70,10 @@ public interface ActionFilter {
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the request and called the given listener.
*/
protected abstract boolean apply(String action, ActionRequest request, ActionListener listener);
protected abstract boolean apply(String action, ActionRequest<?> request, ActionListener<?> listener);
@Override
public final void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
public final void apply(String action, ActionResponse response, ActionListener<?> listener, ActionFilterChain chain) {
if (apply(action, response, listener)) {
chain.proceed(action, response, listener);
}
@ -83,6 +83,6 @@ public interface ActionFilter {
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the response by calling the given listener.
*/
protected abstract boolean apply(String action, ActionResponse response, ActionListener listener);
protected abstract boolean apply(String action, ActionResponse response, ActionListener<?> listener);
}
}

View File

@ -34,8 +34,8 @@ import java.util.function.Supplier;
/**
* A TransportAction that self registers a handler into the transport service
*/
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse> extends TransportAction<Request,Response>{
public abstract class HandledTransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse>
extends TransportAction<Request, Response> {
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());

View File

@ -40,7 +40,7 @@ import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
/**
*
*/
public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
public abstract class TransportAction<Request extends ActionRequest<Request>, Response extends ActionResponse> extends AbstractComponent {
protected final ThreadPool threadPool;
protected final String actionName;
@ -66,7 +66,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
return future;
}
public final void execute(Request request, ActionListener<Response> listener) {
public final Task execute(Request request, ActionListener<Response> listener) {
Task task = taskManager.register("transport", actionName, request);
if (task == null) {
execute(null, request, listener);
@ -85,6 +85,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
}
});
}
return task;
}
private final void execute(Task task, Request request, ActionListener<Response> listener) {
@ -114,7 +115,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
protected abstract void doExecute(Request request, ActionListener<Response> listener);
private static class RequestFilterChain<Request extends ActionRequest, Response extends ActionResponse> implements ActionFilterChain {
private static class RequestFilterChain<Request extends ActionRequest<Request>, Response extends ActionResponse> implements ActionFilterChain {
private final TransportAction<Request, Response> action;
private final AtomicInteger index = new AtomicInteger();

View File

@ -49,7 +49,7 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportBroadcastAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
public abstract class TransportBroadcastAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
extends HandledTransportAction<Request, Response> {
protected final ClusterService clusterService;

View File

@ -74,7 +74,7 @@ import java.util.function.Supplier;
* @param <Response> the response to the client request
* @param <ShardOperationResult> per-shard operation results
*/
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest,
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest<Request>,
Response extends BroadcastResponse,
ShardOperationResult extends Streamable> extends HandledTransportAction<Request, Response> {
@ -447,10 +447,12 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
return nodeId;
}
@Override
public String[] indices() {
return indicesLevelRequest.indices();
}
@Override
public IndicesOptions indicesOptions() {
return indicesLevelRequest.indicesOptions();
}

View File

@ -50,7 +50,7 @@ import java.util.function.Supplier;
/**
* A base class for operations that needs to be performed on the master node.
*/
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;

View File

@ -33,7 +33,8 @@ import java.util.function.Supplier;
* A base class for read operations that needs to be performed on the master node.
* Can also be executed on the local node if needed.
*/
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest, Response extends ActionResponse> extends TransportMasterNodeAction<Request, Response> {
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeAction<Request, Response> {
public static final String FORCE_LOCAL_SETTING = "action.master.force_local";

View File

@ -33,7 +33,8 @@ import java.util.function.Supplier;
/**
*/
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest, Response extends ActionResponse> extends TransportMasterNodeReadAction<Request, Response> {
public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeReadAction<Request, Response> {
public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters,

View File

@ -50,7 +50,7 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest<NodesRequest>, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction<NodesRequest, NodesResponse> {
protected final ClusterName clusterName;
protected final ClusterService clusterService;

View File

@ -0,0 +1,59 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.replication;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.index.shard.ShardId;
/**
* A replication request that has no more information than ReplicationRequest.
* Unfortunately ReplicationRequest can't be declared as a type parameter
* because it has a self referential type parameter of its own. So use this
* instead.
*/
public class BasicReplicationRequest extends ReplicationRequest<BasicReplicationRequest> {
public BasicReplicationRequest() {
}
/**
* Creates a new request that inherits headers and context from the request
* provided as argument.
*/
public BasicReplicationRequest(ActionRequest<?> request) {
super(request);
}
/**
* Creates a new request with resolved shard id
*/
public BasicReplicationRequest(ActionRequest<?> request, ShardId shardId) {
super(request, shardId);
}
/**
* Copy constructor that creates a new request that is a copy of the one
* provided as an argument.
*/
protected BasicReplicationRequest(BasicReplicationRequest request) {
super(request);
}
}

View File

@ -38,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
*
*/
public class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
public abstract class ReplicationRequest<Request extends ReplicationRequest<Request>> extends ActionRequest<Request> implements IndicesRequest {
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.support.replication;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ReplicationResponse;
@ -52,7 +53,8 @@ import java.util.function.Supplier;
* Base class for requests that should be executed on all shards of an index or several indices.
* This action sends shard requests to all primary shards of the indices and they are then replicated like write requests
*/
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest, ShardResponse extends ReplicationResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportBroadcastReplicationAction<Request extends BroadcastRequest<Request>, Response extends BroadcastResponse, ShardRequest extends ReplicationRequest<ShardRequest>, ShardResponse extends ReplicationResponse>
extends HandledTransportAction<Request, Response> {
private final TransportReplicationAction replicatedBroadcastShardAction;
private final ClusterService clusterService;

View File

@ -90,7 +90,7 @@ import java.util.function.Supplier;
* primary node to validate request before primary operation followed by sampling state again for resolving
* nodes with replica copies to perform replication.
*/
public abstract class TransportReplicationAction<Request extends ReplicationRequest, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
public abstract class TransportReplicationAction<Request extends ReplicationRequest<Request>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>, Response extends ReplicationResponse> extends TransportAction<Request, Response> {
protected final TransportService transportService;
protected final ClusterService clusterService;

View File

@ -54,8 +54,8 @@ import java.util.function.Supplier;
/**
*
*/
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest<Request>, Response extends ActionResponse>
extends HandledTransportAction<Request, Response> {
protected final ClusterService clusterService;
protected final TransportService transportService;

View File

@ -54,7 +54,7 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvaila
* the read operation can be performed on other shard copies. Concrete implementations can provide their own list
* of candidate shards to try the read operation on.
*/
public abstract class TransportSingleShardAction<Request extends SingleShardRequest, Response extends ActionResponse> extends TransportAction<Request, Response> {
public abstract class TransportSingleShardAction<Request extends SingleShardRequest<Request>, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final ClusterService clusterService;

View File

@ -40,7 +40,8 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder type.
* @return A future allowing to get back the response.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(final Action<Request, Response, RequestBuilder> action, final Request request);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
final Action<Request, Response, RequestBuilder> action, final Request request);
/**
* Executes a generic action, denoted by an {@link Action}.
@ -52,7 +53,8 @@ public interface ElasticsearchClient {
* @param <Response> The response type.
* @param <RequestBuilder> The request builder type.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
/**
* Prepares a request builder to execute, specified by {@link Action}.
@ -63,7 +65,8 @@ public interface ElasticsearchClient {
* @param <RequestBuilder> The request builder.
* @return The request builder, that can, at a later stage, execute the request.
*/
<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action);
<Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action);
/**
* Returns the threadpool used to execute requests on this client

View File

@ -52,7 +52,8 @@ public abstract class FilterClient extends AbstractClient {
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
in().execute(action, request, listener);
}

View File

@ -56,7 +56,8 @@ public class NodeClient extends AbstractClient {
@SuppressWarnings("unchecked")
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
TransportAction<Request, Response> transportAction = actions.get(action);
if (transportAction == null) {
throw new IllegalStateException("failed to find action [" + action + "] to execute");

View File

@ -363,12 +363,14 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(final Action<Request, Response, RequestBuilder> action) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
final Action<Request, Response, RequestBuilder> action) {
return action.newRequestBuilder(this);
}
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
PlainActionFuture<Response> actionFuture = PlainActionFuture.newFuture();
execute(action, request, actionFuture);
return actionFuture;
@ -378,13 +380,14 @@ public abstract class AbstractClient extends AbstractComponent implements Client
* This is the single execution point of *all* clients.
*/
@Override
public final <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public final <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
headers.applyTo(request);
listener = threadedWrapper.wrap(listener);
doExecute(action, request, listener);
}
protected abstract <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
protected abstract <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener);
@Override
public ActionFuture<IndexResponse> index(final IndexRequest request) {
@ -821,17 +824,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action);
}
@ -1178,17 +1184,20 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(Action<Request, Response, RequestBuilder> action, Request request) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> ActionFuture<Response> execute(
Action<Request, Response, RequestBuilder> action, Request request) {
return client.execute(action, request);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
client.execute(action, request, listener);
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(Action<Request, Response, RequestBuilder> action) {
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> RequestBuilder prepareExecute(
Action<Request, Response, RequestBuilder> action) {
return client.prepareExecute(action);
}

View File

@ -19,6 +19,10 @@
package org.elasticsearch.client.transport;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
@ -36,6 +40,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
@ -54,10 +59,6 @@ import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
@ -128,6 +129,7 @@ public class TransportClient extends AbstractClient {
final ThreadPool threadPool = new ThreadPool(settings);
final NetworkService networkService = new NetworkService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
try {
ModulesBuilder modules = new ModulesBuilder();
@ -138,10 +140,10 @@ public class TransportClient extends AbstractClient {
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings, settingsFilter ));
modules.add(new NetworkModule(networkService, this.settings, true));
modules.add(new NetworkModule(networkService, this.settings, true, namedWriteableRegistry));
modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool));
modules.add(new SearchModule() {
modules.add(new SearchModule(settings, namedWriteableRegistry) {
@Override
protected void configure() {
// noop
@ -276,7 +278,7 @@ public class TransportClient extends AbstractClient {
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
proxy.execute(action, request, listener);
}
}

View File

@ -317,7 +317,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// first, add the default mapping
if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
try {
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), false, request.updateAllTypes());
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing default mapping on index creation";
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, MapperService.DEFAULT_MAPPING, e.getMessage());
@ -329,7 +329,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
try {
// apply the default here, its the first time we parse it
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), true, request.updateAllTypes());
mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
} catch (Exception e) {
removalReason = "failed on parsing mappings on index creation";
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());

View File

@ -104,12 +104,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
// temporarily create the index and add mappings so we can parse the filter
try {
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
}
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
} catch (Exception e) {
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());

View File

@ -125,7 +125,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, () -> null)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), MapperService.MergeReason.MAPPING_RECOVERY, false);
}
}
}

View File

@ -143,7 +143,7 @@ public class MetaDataMappingService extends AbstractComponent {
removeIndex = true;
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
}
@ -223,7 +223,7 @@ public class MetaDataMappingService extends AbstractComponent {
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
}
}
@ -303,7 +303,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {

View File

@ -36,7 +36,7 @@ public class CircleBuilder extends ShapeBuilder {
public static final String FIELD_RADIUS = "radius";
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
static final CircleBuilder PROTOTYPE = new CircleBuilder();
public static final CircleBuilder PROTOTYPE = new CircleBuilder();
private DistanceUnit unit = DistanceUnit.DEFAULT;
private double radius;

View File

@ -33,7 +33,7 @@ public class EnvelopeBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
private Coordinate topLeft;
private Coordinate bottomRight;

View File

@ -36,7 +36,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();

View File

@ -57,7 +57,7 @@ public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {

View File

@ -37,7 +37,7 @@ public class MultiLineStringBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
private final ArrayList<LineStringBuilder> lines = new ArrayList<>();

View File

@ -37,7 +37,7 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
public static final MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
/**
* Create a new {@link MultiPointBuilder}.

View File

@ -36,7 +36,7 @@ import java.util.Objects;
public class MultiPolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();

View File

@ -32,7 +32,7 @@ import java.util.Objects;
public class PointBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POINT;
static final PointBuilder PROTOTYPE = new PointBuilder();
public static final PointBuilder PROTOTYPE = new PointBuilder();
private Coordinate coordinate;

View File

@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class PolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
.coordinate(1.0, 0.0).coordinate(0.0, 0.0));
private static final Coordinate[][] EMPTY = new Coordinate[0][];

View File

@ -1,45 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
/**
* Register the shape builder prototypes with the {@link NamedWriteableRegistry}
*/
public class ShapeBuilderRegistry {
@Inject
public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
}
}
}

View File

@ -49,8 +49,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
@Override
public boolean equals(Object other) {
return other instanceof AndMatcher
&& ((AndMatcher) other).a.equals(a)
&& ((AndMatcher) other).b.equals(b);
&& ((AndMatcher<?>) other).a.equals(a)
&& ((AndMatcher<?>) other).b.equals(b);
}
@Override
@ -80,8 +80,8 @@ public abstract class AbstractMatcher<T> implements Matcher<T> {
@Override
public boolean equals(Object other) {
return other instanceof OrMatcher
&& ((OrMatcher) other).a.equals(a)
&& ((OrMatcher) other).b.equals(b);
&& ((OrMatcher<?>) other).a.equals(a)
&& ((OrMatcher<?>) other).b.equals(b);
}
@Override

View File

@ -20,9 +20,11 @@
package org.elasticsearch.common.lucene;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -72,7 +74,7 @@ public final class ShardCoreKeyMap {
}
final boolean added = objects.add(coreKey);
assert added;
reader.addCoreClosedListener(ownerCoreCacheKey -> {
CoreClosedListener listener = ownerCoreCacheKey -> {
assert coreKey == ownerCoreCacheKey;
synchronized (ShardCoreKeyMap.this) {
coreKeyToShard.remove(ownerCoreCacheKey);
@ -83,7 +85,20 @@ public final class ShardCoreKeyMap {
indexToCoreKey.remove(index);
}
}
});
};
boolean addedListener = false;
try {
reader.addCoreClosedListener(listener);
addedListener = true;
} finally {
if (false == addedListener) {
try {
listener.onClose(coreKey);
} catch (IOException e) {
throw new RuntimeException("Blow up trying to recover from failure to add listener", e);
}
}
}
}
}
}

View File

@ -19,6 +19,9 @@
package org.elasticsearch.common.network;
import java.util.Arrays;
import java.util.List;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.client.transport.support.TransportProxyClient;
@ -135,9 +138,6 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.Arrays;
import java.util.List;
/**
* A module to handle registering and binding all network related classes.
*/
@ -291,6 +291,7 @@ public class NetworkModule extends AbstractModule {
private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
// we must separate the cat rest handlers so RestCatAction can collect them...
private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
private final NamedWriteableRegistry namedWriteableRegistry;
/**
* Creates a network module that custom networking classes can be plugged into.
@ -298,11 +299,13 @@ public class NetworkModule extends AbstractModule {
* @param networkService A constructed network service object to bind.
* @param settings The settings for the node
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
* @param namedWriteableRegistry registry for named writeables for use during streaming
*/
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient, NamedWriteableRegistry namedWriteableRegistry) {
this.networkService = networkService;
this.settings = settings;
this.transportClient = transportClient;
this.namedWriteableRegistry = namedWriteableRegistry;
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
@ -354,7 +357,7 @@ public class NetworkModule extends AbstractModule {
@Override
protected void configure() {
bind(NetworkService.class).toInstance(networkService);
bind(NamedWriteableRegistry.class).asEagerSingleton();
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;

View File

@ -40,7 +40,7 @@ import java.util.function.Consumer;
*/
public abstract class AbstractScopedSettings extends AbstractComponent {
private Settings lastSettingsApplied = Settings.EMPTY;
private final List<SettingUpdater> settingUpdaters = new ArrayList<>();
private final List<SettingUpdater<?>> settingUpdaters = new ArrayList<>();
private final Map<String, Setting<?>> complexMatchers = new HashMap<>();
private final Map<String, Setting<?>> keySettings = new HashMap<>();
private final Setting.Scope scope;
@ -86,7 +86,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Settings current = Settings.builder().put(this.settings).put(settings).build();
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
List<RuntimeException> exceptions = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
try {
if (settingUpdater.hasChanged(current, previous)) {
settingUpdater.getValue(current, previous);
@ -117,7 +117,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
try {
List<Runnable> applyRunnables = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
for (SettingUpdater<?> settingUpdater : settingUpdaters) {
try {
applyRunnables.add(settingUpdater.updater(current, previous));
} catch (Exception ex) {
@ -210,6 +210,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
/**
* Transactional interface to update settings.
* @see Setting
* @param <T> the type of the value of the setting
*/
public interface SettingUpdater<T> {
@ -262,17 +263,16 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
/**
* Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found.
*/
public Setting get(String key) {
public Setting<?> get(String key) {
Setting<?> setting = keySettings.get(key);
if (setting == null) {
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
if (entry.getValue().match(key)) {
return entry.getValue();
}
}
} else {
if (setting != null) {
return setting;
}
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
if (entry.getValue().match(key)) {
return entry.getValue();
}
}
return null;
}
@ -280,7 +280,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
*/
public boolean hasDynamicSetting(String key) {
final Setting setting = get(key);
final Setting<?> setting = get(key);
return setting != null && setting.isDynamic();
}

View File

@ -43,6 +43,8 @@ import java.util.function.Function;
import java.util.regex.Pattern;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API.
*/
public class Setting<T> extends ToXContentToBytes {
private final String key;
@ -184,11 +186,18 @@ public class Setting<T> extends ToXContentToBytes {
INDEX;
}
final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) {
/**
* Build a new updater with a noop validator.
*/
final AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger) {
return newUpdater(consumer, logger, (s) -> {});
}
AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
/**
* Build the updater responsible for validating new values, logging the new
* value, and eventually setting the value where it belongs.
*/
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
if (isDynamic()) {
return new Updater(consumer, logger, validator);
} else {
@ -366,6 +375,7 @@ public class Setting<T> extends ToXContentToBytes {
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
}
@Override
public boolean match(String toTest) {
return pattern.matcher(toTest).matches();
}

View File

@ -1065,7 +1065,7 @@ public abstract class Engine implements Closeable {
}
}
public static class CommitId implements Writeable {
public static class CommitId implements Writeable<CommitId> {
private final byte[] id;

View File

@ -231,7 +231,8 @@ public class InternalEngine extends Engine {
protected void recoverFromTranslog(EngineConfig engineConfig, Translog.TranslogGeneration translogGeneration) throws IOException {
int opsRecovered = 0;
final TranslogRecoveryPerformer handler = engineConfig.getTranslogRecoveryPerformer();
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
try {
Translog.Snapshot snapshot = translog.newSnapshot();
Translog.Operation operation;
while ((operation = snapshot.next()) != null) {
try {

View File

@ -79,10 +79,20 @@ public class DocumentMapper implements ToXContent {
this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1));
this.rootObjectMapper = builder.build(builderContext);
final String type = rootObjectMapper.name();
DocumentMapper existingMapper = mapperService.documentMapper(type);
for (Map.Entry<String, MetadataFieldMapper.TypeParser> entry : mapperService.mapperRegistry.getMetadataMapperParsers().entrySet()) {
final String name = entry.getKey();
final TypeParser parser = entry.getValue();
final MetadataFieldMapper metadataMapper = parser.getDefault(indexSettings, mapperService.fullName(name), builder.name());
final MetadataFieldMapper existingMetadataMapper = existingMapper == null
? null
: (MetadataFieldMapper) existingMapper.mappers().getMapper(name);
final MetadataFieldMapper metadataMapper;
if (existingMetadataMapper == null) {
final TypeParser parser = entry.getValue();
metadataMapper = parser.getDefault(indexSettings, mapperService.fullName(name), builder.name());
} else {
metadataMapper = existingMetadataMapper;
}
metadataMappers.put(metadataMapper.getClass(), metadataMapper);
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.index.mapper;
import com.carrotsearch.hppc.ObjectHashSet;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.IndexOptions;
@ -33,7 +34,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.search.Queries;
@ -60,7 +60,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -79,6 +78,22 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
*/
public class MapperService extends AbstractIndexComponent implements Closeable {
/**
* The reason why a mapping is being merged.
*/
public enum MergeReason {
/**
* Create or update a mapping.
*/
MAPPING_UPDATE,
/**
* Recovery of an existing mapping, for instance because of a restart,
* if a shard was moved to a different node or for administrative
* purposes.
*/
MAPPING_RECOVERY;
}
public static final String DEFAULT_MAPPING = "_default_";
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX);
@ -204,7 +219,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
typeListeners.remove(listener);
}
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) {
// verify we can parse it
// NOTE: never apply the default here
@ -222,9 +237,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return mapper;
} else {
synchronized (this) {
// only apply the default mapping if we don't have the type yet
applyDefault &= mappers.containsKey(type) == false;
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
final boolean applyDefault =
// the default was already applied if we are recovering
reason != MergeReason.MAPPING_RECOVERY
// only apply the default mapping if we don't have the type yet
&& mappers.containsKey(type) == false;
DocumentMapper mergeWith = parse(type, mappingSource, applyDefault);
return merge(mergeWith, updateAllTypes);
}
}
}

View File

@ -213,11 +213,24 @@ public class IpFieldMapper extends NumberFieldMapper {
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
if (value != null) {
long[] fromTo;
String term;
if (value instanceof BytesRef) {
fromTo = Cidrs.cidrMaskToMinMax(((BytesRef) value).utf8ToString());
term = ((BytesRef) value).utf8ToString();
} else {
fromTo = Cidrs.cidrMaskToMinMax(value.toString());
term = value.toString();
}
long[] fromTo;
// assume that the term is either a CIDR range or the
// term is a single IPv4 address; if either of these
// assumptions is wrong, the CIDR parsing will fail
// anyway, and that is okay
if (term.contains("/")) {
// treat the term as if it is in CIDR notation
fromTo = Cidrs.cidrMaskToMinMax(term);
} else {
// treat the term as if it is a single IPv4, and
// apply a CIDR mask equivalent to the host route
fromTo = Cidrs.cidrMaskToMinMax(term + "/32");
}
if (fromTo != null) {
return rangeQuery(fromTo[0] == 0 ? null : fromTo[0],

View File

@ -42,7 +42,7 @@ import java.util.Objects;
* Base class for all classes producing lucene queries.
* Supports conversion to BytesReference and creation of lucene Query objects.
*/
public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder> extends ToXContentToBytes implements QueryBuilder<QB> {
public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>> extends ToXContentToBytes implements QueryBuilder<QB> {
/** Default for boost to apply to resulting Lucene query. Defaults to 1.0*/
public static final float DEFAULT_BOOST = 1.0f;
@ -225,10 +225,10 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder> exte
* their {@link QueryBuilder#toQuery(QueryShardContext)} method are not added to the
* resulting collection.
*/
protected static Collection<Query> toQueries(Collection<QueryBuilder> queryBuilders, QueryShardContext context) throws QueryShardException,
protected static Collection<Query> toQueries(Collection<QueryBuilder<?>> queryBuilders, QueryShardContext context) throws QueryShardException,
IOException {
List<Query> queries = new ArrayList<>(queryBuilders.size());
for (QueryBuilder queryBuilder : queryBuilders) {
for (QueryBuilder<?> queryBuilder : queryBuilders) {
Query query = queryBuilder.toQuery(context);
if (query != null) {
queries.add(query);
@ -243,15 +243,15 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder> exte
return getWriteableName();
}
protected final void writeQueries(StreamOutput out, List<? extends QueryBuilder> queries) throws IOException {
protected final void writeQueries(StreamOutput out, List<? extends QueryBuilder<?>> queries) throws IOException {
out.writeVInt(queries.size());
for (QueryBuilder query : queries) {
for (QueryBuilder<?> query : queries) {
out.writeQuery(query);
}
}
protected final List<QueryBuilder> readQueries(StreamInput in) throws IOException {
List<QueryBuilder> queries = new ArrayList<>();
protected final List<QueryBuilder<?>> readQueries(StreamInput in) throws IOException {
List<QueryBuilder<?>> queries = new ArrayList<>();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
queries.add(in.readQuery());

View File

@ -49,13 +49,13 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
static final BoolQueryBuilder PROTOTYPE = new BoolQueryBuilder();
private final List<QueryBuilder> mustClauses = new ArrayList<>();
private final List<QueryBuilder<?>> mustClauses = new ArrayList<>();
private final List<QueryBuilder> mustNotClauses = new ArrayList<>();
private final List<QueryBuilder<?>> mustNotClauses = new ArrayList<>();
private final List<QueryBuilder> filterClauses = new ArrayList<>();
private final List<QueryBuilder<?>> filterClauses = new ArrayList<>();
private final List<QueryBuilder> shouldClauses = new ArrayList<>();
private final List<QueryBuilder<?>> shouldClauses = new ArrayList<>();
private boolean disableCoord = DISABLE_COORD_DEFAULT;
@ -67,7 +67,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
* Adds a query that <b>must</b> appear in the matching documents and will
* contribute to scoring. No <tt>null</tt> value allowed.
*/
public BoolQueryBuilder must(QueryBuilder queryBuilder) {
public BoolQueryBuilder must(QueryBuilder<?> queryBuilder) {
if (queryBuilder == null) {
throw new IllegalArgumentException("inner bool query clause cannot be null");
}
@ -78,7 +78,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
/**
* Gets the queries that <b>must</b> appear in the matching documents.
*/
public List<QueryBuilder> must() {
public List<QueryBuilder<?>> must() {
return this.mustClauses;
}
@ -86,7 +86,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
* Adds a query that <b>must</b> appear in the matching documents but will
* not contribute to scoring. No <tt>null</tt> value allowed.
*/
public BoolQueryBuilder filter(QueryBuilder queryBuilder) {
public BoolQueryBuilder filter(QueryBuilder<?> queryBuilder) {
if (queryBuilder == null) {
throw new IllegalArgumentException("inner bool query clause cannot be null");
}
@ -95,9 +95,9 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
}
/**
* Gets the queries that <b>must</b> appear in the matching documents but don't conntribute to scoring
* Gets the queries that <b>must</b> appear in the matching documents but don't contribute to scoring
*/
public List<QueryBuilder> filter() {
public List<QueryBuilder<?>> filter() {
return this.filterClauses;
}
@ -105,7 +105,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
* Adds a query that <b>must not</b> appear in the matching documents.
* No <tt>null</tt> value allowed.
*/
public BoolQueryBuilder mustNot(QueryBuilder queryBuilder) {
public BoolQueryBuilder mustNot(QueryBuilder<?> queryBuilder) {
if (queryBuilder == null) {
throw new IllegalArgumentException("inner bool query clause cannot be null");
}
@ -116,7 +116,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
/**
* Gets the queries that <b>must not</b> appear in the matching documents.
*/
public List<QueryBuilder> mustNot() {
public List<QueryBuilder<?>> mustNot() {
return this.mustNotClauses;
}
@ -127,7 +127,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
*
* @see #minimumNumberShouldMatch(int)
*/
public BoolQueryBuilder should(QueryBuilder queryBuilder) {
public BoolQueryBuilder should(QueryBuilder<?> queryBuilder) {
if (queryBuilder == null) {
throw new IllegalArgumentException("inner bool query clause cannot be null");
}
@ -141,7 +141,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
* @see #should(QueryBuilder)
* @see #minimumNumberShouldMatch(int)
*/
public List<QueryBuilder> should() {
public List<QueryBuilder<?>> should() {
return this.shouldClauses;
}
@ -244,12 +244,12 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
builder.endObject();
}
private static void doXArrayContent(String field, List<QueryBuilder> clauses, XContentBuilder builder, Params params) throws IOException {
private static void doXArrayContent(String field, List<QueryBuilder<?>> clauses, XContentBuilder builder, Params params) throws IOException {
if (clauses.isEmpty()) {
return;
}
builder.startArray(field);
for (QueryBuilder clause : clauses) {
for (QueryBuilder<?> clause : clauses) {
clause.toXContent(builder, params);
}
builder.endArray();
@ -282,8 +282,8 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
return adjustPureNegative ? fixNegativeQueryIfNeeded(query) : query;
}
private static void addBooleanClauses(QueryShardContext context, BooleanQuery.Builder booleanQueryBuilder, List<QueryBuilder> clauses, Occur occurs) throws IOException {
for (QueryBuilder query : clauses) {
private static void addBooleanClauses(QueryShardContext context, BooleanQuery.Builder booleanQueryBuilder, List<QueryBuilder<?>> clauses, Occur occurs) throws IOException {
for (QueryBuilder<?> query : clauses) {
Query luceneQuery = null;
switch (occurs) {
case MUST:
@ -321,7 +321,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
@Override
protected BoolQueryBuilder doReadFrom(StreamInput in) throws IOException {
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
List<QueryBuilder> queryBuilders = readQueries(in);
List<QueryBuilder<?>> queryBuilders = readQueries(in);
boolQueryBuilder.mustClauses.addAll(queryBuilders);
queryBuilders = readQueries(in);
boolQueryBuilder.mustNotClauses.addAll(queryBuilders);

View File

@ -19,17 +19,14 @@
package org.elasticsearch.index.query;
import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentParser;
/**
* Parser for bool query
*/
@ -45,11 +42,6 @@ public class BoolQueryParser implements QueryParser<BoolQueryBuilder> {
public static final ParseField MINIMUM_NUMBER_SHOULD_MATCH = new ParseField("minimum_number_should_match");
public static final ParseField ADJUST_PURE_NEGATIVE = new ParseField("adjust_pure_negative");
@Inject
public BoolQueryParser(Settings settings) {
BooleanQuery.setMaxClauseCount(settings.getAsInt("index.query.bool.max_clause_count", settings.getAsInt("indices.query.bool.max_clause_count", BooleanQuery.getMaxClauseCount())));
}
@Override
public String[] names() {
return new String[]{BoolQueryBuilder.NAME};

View File

@ -40,7 +40,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
public static final String NAME = "dis_max";
private final ArrayList<QueryBuilder> queries = new ArrayList<>();
private final List<QueryBuilder<?>> queries = new ArrayList<>();
/** Default multiplication factor for breaking ties in document scores.*/
public static float DEFAULT_TIE_BREAKER = 0.0f;
@ -51,7 +51,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
/**
* Add a sub-query to this disjunction.
*/
public DisMaxQueryBuilder add(QueryBuilder queryBuilder) {
public DisMaxQueryBuilder add(QueryBuilder<?> queryBuilder) {
if (queryBuilder == null) {
throw new IllegalArgumentException("inner dismax query clause cannot be null");
}
@ -62,7 +62,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
/**
* @return an immutable list copy of the current sub-queries of this disjunction
*/
public List<QueryBuilder> innerQueries() {
public List<QueryBuilder<?>> innerQueries() {
return this.queries;
}
@ -90,7 +90,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
builder.startObject(NAME);
builder.field(DisMaxQueryParser.TIE_BREAKER_FIELD.getPreferredName(), tieBreaker);
builder.startArray(DisMaxQueryParser.QUERIES_FIELD.getPreferredName());
for (QueryBuilder queryBuilder : queries) {
for (QueryBuilder<?> queryBuilder : queries) {
queryBuilder.toXContent(builder, params);
}
builder.endArray();
@ -112,7 +112,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
@Override
protected DisMaxQueryBuilder doReadFrom(StreamInput in) throws IOException {
DisMaxQueryBuilder disMax = new DisMaxQueryBuilder();
List<QueryBuilder> queryBuilders = readQueries(in);
List<QueryBuilder<?>> queryBuilders = readQueries(in);
disMax.queries.addAll(queryBuilders);
disMax.tieBreaker = in.readFloat();
return disMax;

View File

@ -19,15 +19,14 @@
package org.elasticsearch.index.query;
import java.io.IOException;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.support.QueryInnerHits;
import java.io.IOException;
public class NestedQueryParser implements QueryParser<NestedQueryBuilder> {
private static final NestedQueryBuilder PROTOTYPE = new NestedQueryBuilder("", EmptyQueryBuilder.PROTOTYPE);
@ -38,7 +37,7 @@ public class NestedQueryParser implements QueryParser<NestedQueryBuilder> {
@Override
public String[] names() {
return new String[]{NestedQueryBuilder.NAME, Strings.toCamelCase(NestedQueryBuilder.NAME)};
return new String[]{NestedQueryBuilder.NAME};
}
@Override

View File

@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
import java.io.IOException;
public interface QueryBuilder<QB extends QueryBuilder> extends NamedWriteable<QB>, ToXContent {
public interface QueryBuilder<QB extends QueryBuilder<QB>> extends NamedWriteable<QB>, ToXContent {
/**
* Converts this QueryBuilder to a lucene {@link Query}.

View File

@ -43,7 +43,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
/** Default for flag controlling whether matches are required to be in-order */
public static boolean DEFAULT_IN_ORDER = true;
private final List<SpanQueryBuilder> clauses = new ArrayList<>();
private final List<SpanQueryBuilder<?>> clauses = new ArrayList<>();
private final int slop;
@ -55,7 +55,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
* @param initialClause an initial span query clause
* @param slop controls the maximum number of intervening unmatched positions permitted
*/
public SpanNearQueryBuilder(SpanQueryBuilder initialClause, int slop) {
public SpanNearQueryBuilder(SpanQueryBuilder<?> initialClause, int slop) {
if (initialClause == null) {
throw new IllegalArgumentException("query must include at least one clause");
}
@ -70,7 +70,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
return this.slop;
}
public SpanNearQueryBuilder clause(SpanQueryBuilder clause) {
public SpanNearQueryBuilder clause(SpanQueryBuilder<?> clause) {
if (clause == null) {
throw new IllegalArgumentException("query clauses cannot be null");
}
@ -81,7 +81,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
/**
* @return the {@link SpanQueryBuilder} clauses that were set for this query
*/
public List<SpanQueryBuilder> clauses() {
public List<SpanQueryBuilder<?>> clauses() {
return this.clauses;
}
@ -106,7 +106,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startArray(SpanNearQueryParser.CLAUSES_FIELD.getPreferredName());
for (SpanQueryBuilder clause : clauses) {
for (SpanQueryBuilder<?> clause : clauses) {
clause.toXContent(builder, params);
}
builder.endArray();
@ -129,10 +129,10 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
@Override
protected SpanNearQueryBuilder doReadFrom(StreamInput in) throws IOException {
List<QueryBuilder> clauses = readQueries(in);
SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder((SpanQueryBuilder)clauses.get(0), in.readVInt());
List<QueryBuilder<?>> clauses = readQueries(in);
SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder((SpanQueryBuilder<?>)clauses.get(0), in.readVInt());
for (int i = 1; i < clauses.size(); i++) {
queryBuilder.clauses.add((SpanQueryBuilder)clauses.get(i));
queryBuilder.clauses.add((SpanQueryBuilder<?>)clauses.get(i));
}
queryBuilder.inOrder = in.readBoolean();
return queryBuilder;

View File

@ -38,18 +38,18 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
public static final String NAME = "span_or";
private final List<SpanQueryBuilder> clauses = new ArrayList<>();
private final List<SpanQueryBuilder<?>> clauses = new ArrayList<>();
static final SpanOrQueryBuilder PROTOTYPE = new SpanOrQueryBuilder(SpanTermQueryBuilder.PROTOTYPE);
public SpanOrQueryBuilder(SpanQueryBuilder initialClause) {
public SpanOrQueryBuilder(SpanQueryBuilder<?> initialClause) {
if (initialClause == null) {
throw new IllegalArgumentException("query must include at least one clause");
}
clauses.add(initialClause);
}
public SpanOrQueryBuilder clause(SpanQueryBuilder clause) {
public SpanOrQueryBuilder clause(SpanQueryBuilder<?> clause) {
if (clause == null) {
throw new IllegalArgumentException("inner bool query clause cannot be null");
}
@ -60,7 +60,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
/**
* @return the {@link SpanQueryBuilder} clauses that were set for this query
*/
public List<SpanQueryBuilder> clauses() {
public List<SpanQueryBuilder<?>> clauses() {
return this.clauses;
}
@ -68,7 +68,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startArray(SpanOrQueryParser.CLAUSES_FIELD.getPreferredName());
for (SpanQueryBuilder clause : clauses) {
for (SpanQueryBuilder<?> clause : clauses) {
clause.toXContent(builder, params);
}
builder.endArray();
@ -89,10 +89,10 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
@Override
protected SpanOrQueryBuilder doReadFrom(StreamInput in) throws IOException {
List<QueryBuilder> clauses = readQueries(in);
SpanOrQueryBuilder queryBuilder = new SpanOrQueryBuilder((SpanQueryBuilder)clauses.get(0));
List<QueryBuilder<?>> clauses = readQueries(in);
SpanOrQueryBuilder queryBuilder = new SpanOrQueryBuilder((SpanQueryBuilder<?>)clauses.get(0));
for (int i = 1; i < clauses.size(); i++) {
queryBuilder.clauses.add((SpanQueryBuilder)clauses.get(i));
queryBuilder.clauses.add((SpanQueryBuilder<?>)clauses.get(i));
}
return queryBuilder;

View File

@ -22,6 +22,6 @@ package org.elasticsearch.index.query;
/**
* Marker interface for a specific type of {@link QueryBuilder} that allows to build span queries
*/
public interface SpanQueryBuilder<QB extends SpanQueryBuilder> extends QueryBuilder<QB> {
public interface SpanQueryBuilder<QB extends SpanQueryBuilder<QB>> extends QueryBuilder<QB> {
}

View File

@ -19,10 +19,13 @@
package org.elasticsearch.index.query.functionscore;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.function.CombineFunction;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
@ -36,10 +39,6 @@ import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryParser;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Parser for function_score query
*/
@ -54,7 +53,6 @@ public class FunctionScoreQueryParser implements QueryParser<FunctionScoreQueryB
private final ScoreFunctionParserMapper functionParserMapper;
@Inject
public FunctionScoreQueryParser(ScoreFunctionParserMapper functionParserMapper) {
this.functionParserMapper = functionParserMapper;
}

View File

@ -19,65 +19,30 @@
package org.elasticsearch.index.query.functionscore;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser;
import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentLocation;
import static java.util.Collections.unmodifiableMap;
public class ScoreFunctionParserMapper {
protected Map<String, ScoreFunctionParser<?>> functionParsers;
@Inject
public ScoreFunctionParserMapper(Set<ScoreFunctionParser> parsers, NamedWriteableRegistry namedWriteableRegistry) {
Map<String, ScoreFunctionParser<?>> map = new HashMap<>();
// built-in parsers
addParser(new ScriptScoreFunctionParser(), map, namedWriteableRegistry);
addParser(new GaussDecayFunctionParser(), map, namedWriteableRegistry);
addParser(new LinearDecayFunctionParser(), map, namedWriteableRegistry);
addParser(new ExponentialDecayFunctionParser(), map, namedWriteableRegistry);
addParser(new RandomScoreFunctionParser(), map, namedWriteableRegistry);
addParser(new FieldValueFactorFunctionParser(), map, namedWriteableRegistry);
for (ScoreFunctionParser<?> scoreFunctionParser : parsers) {
addParser(scoreFunctionParser, map, namedWriteableRegistry);
}
this.functionParsers = Collections.unmodifiableMap(map);
//weight doesn't have its own parser, so every function supports it out of the box.
//Can be a single function too when not associated to any other function, which is why it needs to be registered manually here.
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, new WeightBuilder());
public ScoreFunctionParserMapper(Map<String, ScoreFunctionParser<?>> functionParsers) {
this.functionParsers = unmodifiableMap(functionParsers);
}
public ScoreFunctionParser get(XContentLocation contentLocation, String parserName) {
ScoreFunctionParser functionParser = get(parserName);
public ScoreFunctionParser<?> get(XContentLocation contentLocation, String parserName) {
ScoreFunctionParser<?> functionParser = get(parserName);
if (functionParser == null) {
throw new ParsingException(contentLocation, "No function with the name [" + parserName + "] is registered.");
}
return functionParser;
}
private ScoreFunctionParser get(String parserName) {
private ScoreFunctionParser<?> get(String parserName) {
return functionParsers.get(parserName);
}
private static void addParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
for (String name : scoreFunctionParser.getNames()) {
map.put(name, scoreFunctionParser);
}
@SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = scoreFunctionParser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb);
}
}

View File

@ -0,0 +1,136 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
/**
* A base class for all classes that allows reading ops from translog files
*/
public abstract class BaseTranslogReader implements Comparable<BaseTranslogReader> {
protected final long generation;
protected final FileChannel channel;
protected final Path path;
protected final long firstOperationOffset;
public BaseTranslogReader(long generation, FileChannel channel, Path path, long firstOperationOffset) {
assert Translog.parseIdFromFileName(path) == generation : "generation missmatch. Path: " + Translog.parseIdFromFileName(path) + " but generation: " + generation;
this.generation = generation;
this.path = path;
this.channel = channel;
this.firstOperationOffset = firstOperationOffset;
}
public long getGeneration() {
return this.generation;
}
public abstract long sizeInBytes();
abstract public int totalOperations();
public final long getFirstOperationOffset() {
return firstOperationOffset;
}
public Translog.Operation read(Translog.Location location) throws IOException {
assert location.generation == generation : "read location's translog generation [" + location.generation + "] is not [" + generation + "]";
ByteBuffer buffer = ByteBuffer.allocate(location.size);
try (BufferedChecksumStreamInput checksumStreamInput = checksummedStream(buffer, location.translogLocation, location.size, null)) {
return read(checksumStreamInput);
}
}
/** read the size of the op (i.e., number of bytes, including the op size) written at the given position */
protected final int readSize(ByteBuffer reusableBuffer, long position) {
// read op size from disk
assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]";
try {
reusableBuffer.clear();
reusableBuffer.limit(4);
readBytes(reusableBuffer, position);
reusableBuffer.flip();
// Add an extra 4 to account for the operation size integer itself
final int size = reusableBuffer.getInt() + 4;
final long maxSize = sizeInBytes() - position;
if (size < 0 || size > maxSize) {
throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
}
return size;
} catch (IOException e) {
throw new ElasticsearchException("unexpected exception reading from translog snapshot of " + this.path, e);
}
}
public Translog.Snapshot newSnapshot() {
return new TranslogSnapshot(generation, channel, path, firstOperationOffset, sizeInBytes(), totalOperations());
}
/**
* reads an operation at the given position and returns it. The buffer length is equal to the number
* of bytes reads.
*/
protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, BufferedChecksumStreamInput reuse) throws IOException {
final ByteBuffer buffer;
if (reusableBuffer.capacity() >= opSize) {
buffer = reusableBuffer;
} else {
buffer = ByteBuffer.allocate(opSize);
}
buffer.clear();
buffer.limit(opSize);
readBytes(buffer, position);
buffer.flip();
return new BufferedChecksumStreamInput(new ByteBufferStreamInput(buffer), reuse);
}
protected Translog.Operation read(BufferedChecksumStreamInput inStream) throws IOException {
return Translog.readOperation(inStream);
}
/**
* reads bytes at position into the given buffer, filling it.
*/
abstract protected void readBytes(ByteBuffer buffer, long position) throws IOException;
@Override
public String toString() {
return "translog [" + generation + "][" + path + "]";
}
@Override
public int compareTo(BaseTranslogReader o) {
return Long.compare(getGeneration(), o.getGeneration());
}
public Path path() {
return path;
}
}

View File

@ -1,71 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
final class ChannelReference extends AbstractRefCounted {
private final Path file;
private final FileChannel channel;
protected final long generation;
private final Callback<ChannelReference> onClose;
ChannelReference(Path file, long generation, FileChannel channel, Callback<ChannelReference> onClose) throws IOException {
super(file.toString());
this.generation = generation;
this.file = file;
this.channel = channel;
this.onClose = onClose;
}
public long getGeneration() {
return generation;
}
public Path getPath() {
return this.file;
}
public FileChannel getChannel() {
return this.channel;
}
@Override
public String toString() {
return "channel: file [" + file + "], ref count [" + refCount() + "]";
}
@Override
protected void closeInternal() {
try {
IOUtils.closeWhileHandlingException(channel);
} finally {
if (onClose != null) {
onClose.handle(this);
}
}
}
}

View File

@ -1,58 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import java.io.IOException;
/**
* Version 0 of the translog format, there is no header in this file
*/
@Deprecated
public final class LegacyTranslogReader extends LegacyTranslogReaderBase {
/**
* Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point
* at the end of the last operation in this snapshot.
*/
LegacyTranslogReader(long generation, ChannelReference channelReference, long fileLength) {
super(generation, channelReference, 0, fileLength);
}
@Override
protected Translog.Operation read(BufferedChecksumStreamInput in) throws IOException {
// read the opsize before an operation.
// Note that this was written & read out side of the stream when this class was used, but it makes things more consistent
// to read this here
in.readInt();
Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte());
Translog.Operation operation = Translog.newOperationFromType(type);
operation.readFrom(in);
return operation;
}
@Override
protected ImmutableTranslogReader newReader(long generation, ChannelReference channelReference, long firstOperationOffset, long length, int totalOperations) {
assert totalOperations == -1 : "expected unknown but was: " + totalOperations;
assert firstOperationOffset == 0;
return new LegacyTranslogReader(generation, channelReference, length);
}
}

View File

@ -1,64 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Version 1 of the translog format, there is checkpoint and therefore no notion of op count
*/
@Deprecated
class LegacyTranslogReaderBase extends ImmutableTranslogReader {
/**
* Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point
* at the end of the last operation in this snapshot.
*
*/
LegacyTranslogReaderBase(long generation, ChannelReference channelReference, long firstOperationOffset, long fileLength) {
super(generation, channelReference, firstOperationOffset, fileLength, TranslogReader.UNKNOWN_OP_COUNT);
}
@Override
protected Translog.Snapshot newReaderSnapshot(final int totalOperations, ByteBuffer reusableBuffer) {
assert totalOperations == -1 : "legacy we had no idea how many ops: " + totalOperations;
return new ReaderSnapshot(totalOperations, reusableBuffer) {
@Override
public Translog.Operation next() throws IOException {
if (position >= sizeInBytes()) { // this is the legacy case....
return null;
}
try {
return readOperation();
} catch (TruncatedTranslogException ex) {
return null; // legacy case
}
}
};
}
@Override
protected ImmutableTranslogReader newReader(long generation, ChannelReference channelReference, long firstOperationOffset, long length, int totalOperations) {
assert totalOperations == -1 : "expected unknown but was: " + totalOperations;
return new LegacyTranslogReaderBase(generation, channelReference, firstOperationOffset, length);
}
}

View File

@ -19,12 +19,8 @@
package org.elasticsearch.index.translog;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.lease.Releasables;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.Arrays;
/**
* A snapshot composed out of multiple snapshots
@ -32,8 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
final class MultiSnapshot implements Translog.Snapshot {
private final Translog.Snapshot[] translogs;
private AtomicBoolean closed = new AtomicBoolean(false);
private final int estimatedTotalOperations;
private final int totalOperations;
private int index;
/**
@ -41,30 +36,18 @@ final class MultiSnapshot implements Translog.Snapshot {
*/
MultiSnapshot(Translog.Snapshot[] translogs) {
this.translogs = translogs;
int ops = 0;
for (Translog.Snapshot translog : translogs) {
final int tops = translog.estimatedTotalOperations();
if (tops == TranslogReader.UNKNOWN_OP_COUNT) {
ops = TranslogReader.UNKNOWN_OP_COUNT;
break;
}
assert tops >= 0 : "tops must be positive but was: " + tops;
ops += tops;
}
estimatedTotalOperations = ops;
totalOperations = Arrays.stream(translogs).mapToInt(Translog.Snapshot::totalOperations).sum();
index = 0;
}
@Override
public int estimatedTotalOperations() {
return estimatedTotalOperations;
public int totalOperations() {
return totalOperations;
}
@Override
public Translog.Operation next() throws IOException {
ensureOpen();
for (; index < translogs.length; index++) {
final Translog.Snapshot current = translogs[index];
Translog.Operation op = current.next();
@ -74,17 +57,4 @@ final class MultiSnapshot implements Translog.Snapshot {
}
return null;
}
protected void ensureOpen() {
if (closed.get()) {
throw new AlreadyClosedException("snapshot already closed");
}
}
@Override
public void close() throws ElasticsearchException {
if (closed.compareAndSet(false, true)) {
Releasables.close(translogs);
}
}
}

View File

@ -34,12 +34,9 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
@ -53,7 +50,6 @@ import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
@ -69,6 +65,8 @@ import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* A Translog is a per index shard component that records all non-committed index operations in a durable manner.
@ -112,29 +110,25 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$");
private final List<ImmutableTranslogReader> recoveredTranslogs;
// the list of translog readers is guaranteed to be in order of translog generation
private final List<TranslogReader> readers = new ArrayList<>();
private volatile ScheduledFuture<?> syncScheduler;
// this is a concurrent set and is not protected by any of the locks. The main reason
// is that is being accessed by two separate classes (additions & reading are done by FsTranslog, remove by FsView when closed)
// is that is being accessed by two separate classes (additions & reading are done by Translog, remove by View when closed)
private final Set<View> outstandingViews = ConcurrentCollections.newConcurrentSet();
private BigArrays bigArrays;
protected final ReleasableLock readLock;
protected final ReleasableLock writeLock;
private final Path location;
private TranslogWriter current;
private volatile ImmutableTranslogReader currentCommittingTranslog;
private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion.
private final static long NOT_SET_GENERATION = -1; // -1 is safe as it will not cause a translog deletion.
private volatile long currentCommittingGeneration = NOT_SET_GENERATION;
private volatile long lastCommittedTranslogFileGeneration = NOT_SET_GENERATION;
private final AtomicBoolean closed = new AtomicBoolean();
private final TranslogConfig config;
private final String translogUUID;
private Callback<View> onViewClose = new Callback<View>() {
@Override
public void handle(View view) {
logger.trace("closing view starting at translog [{}]", view.minTranslogGeneration());
boolean removed = outstandingViews.remove(view);
assert removed : "View was never set but was supposed to be removed";
}
};
/**
@ -176,11 +170,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// if not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
}
this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint);
if (recoveredTranslogs.isEmpty()) {
this.readers.addAll(recoverFromFiles(translogGeneration, checkpoint));
if (readers.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered");
}
boolean success = false;
@ -193,11 +187,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// for instance if we have a lot of tlog and we can't create the writer we keep on holding
// on to all the uncommitted tlog files if we don't close
if (success == false) {
IOUtils.closeWhileHandlingException(recoveredTranslogs);
IOUtils.closeWhileHandlingException(readers);
}
}
} else {
this.recoveredTranslogs = Collections.emptyList();
IOUtils.rm(location);
logger.debug("wipe translog location - creating new translog");
Files.createDirectories(location);
@ -205,21 +198,22 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
Checkpoint checkpoint = new Checkpoint(0, 0, generation);
Checkpoint.write(location.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
current = createWriter(generation);
this.lastCommittedTranslogFileGeneration = -1; // playing safe
this.lastCommittedTranslogFileGeneration = NOT_SET_GENERATION;
}
// now that we know which files are there, create a new current one.
} catch (Throwable t) {
// close the opened translog files if we fail to create a new translog...
IOUtils.closeWhileHandlingException(currentCommittingTranslog, current);
IOUtils.closeWhileHandlingException(current);
IOUtils.closeWhileHandlingException(readers);
throw t;
}
}
/** recover all translog files found on disk */
private final ArrayList<ImmutableTranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
private final ArrayList<TranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<ImmutableTranslogReader> foundTranslogs = new ArrayList<>();
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
boolean tempFileRenamed = false;
try (ReleasableLock lock = writeLock.acquire()) {
@ -230,7 +224,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (Files.exists(committedTranslogFile) == false) {
throw new IllegalStateException("translog file doesn't exist with generation: " + i + " lastCommitted: " + lastCommittedTranslogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final ImmutableTranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
@ -267,17 +261,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return foundTranslogs;
}
ImmutableTranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
final long generation;
try {
generation = parseIdFromFileName(path);
} catch (IllegalArgumentException ex) {
throw new TranslogException(shardId, "failed to parse generation from file name matching pattern " + path, ex);
}
TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
try {
final ChannelReference raf = new ChannelReference(path, generation, channel, new OnCloseRunnable());
ImmutableTranslogReader reader = ImmutableTranslogReader.open(raf, checkpoint, translogUUID);
assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
TranslogReader reader = TranslogReader.open(channel, path, checkpoint, translogUUID);
channel = null;
return reader;
} finally {
@ -315,12 +303,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try {
current.sync();
} finally {
try {
IOUtils.close(current, currentCommittingTranslog);
} finally {
IOUtils.close(recoveredTranslogs);
recoveredTranslogs.clear();
}
closeFilesIfNoPendingViews();
}
} finally {
FutureUtils.cancel(syncScheduler);
@ -349,41 +332,49 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/**
* Returns the number of operations in the transaction files that aren't committed to lucene..
* Note: may return -1 if unknown
*/
public int totalOperations() {
int ops = 0;
try (ReleasableLock lock = readLock.acquire()) {
ops += current.totalOperations();
if (currentCommittingTranslog != null) {
int tops = currentCommittingTranslog.totalOperations();
assert tops != TranslogReader.UNKNOWN_OP_COUNT;
assert tops >= 0;
ops += tops;
}
}
return ops;
return totalOperations(lastCommittedTranslogFileGeneration);
}
/**
* Returns the size in bytes of the translog files that aren't committed to lucene.
*/
public long sizeInBytes() {
long size = 0;
try (ReleasableLock lock = readLock.acquire()) {
size += current.sizeInBytes();
if (currentCommittingTranslog != null) {
size += currentCommittingTranslog.sizeInBytes();
}
return sizeInBytes(lastCommittedTranslogFileGeneration);
}
/**
* Returns the number of operations in the transaction files that aren't committed to lucene..
*/
private int totalOperations(long minGeneration) {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
return Stream.concat(readers.stream(), Stream.of(current))
.filter(r -> r.getGeneration() >= minGeneration)
.mapToInt(BaseTranslogReader::totalOperations)
.sum();
}
}
/**
* Returns the size in bytes of the translog files that aren't committed to lucene.
*/
private long sizeInBytes(long minGeneration) {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
return Stream.concat(readers.stream(), Stream.of(current))
.filter(r -> r.getGeneration() >= minGeneration)
.mapToLong(BaseTranslogReader::sizeInBytes)
.sum();
}
return size;
}
TranslogWriter createWriter(long fileGeneration) throws IOException {
TranslogWriter newFile;
try {
newFile = TranslogWriter.create(shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), getChannelFactory(), config.getBufferSize());
newFile = TranslogWriter.create(shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), getChannelFactory(), config.getBufferSize());
} catch (IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
}
@ -398,12 +389,12 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
*/
public Translog.Operation read(Location location) {
try (ReleasableLock lock = readLock.acquire()) {
final TranslogReader reader;
final BaseTranslogReader reader;
final long currentGeneration = current.getGeneration();
if (currentGeneration == location.generation) {
reader = current;
} else if (currentCommittingTranslog != null && currentCommittingTranslog.getGeneration() == location.generation) {
reader = currentCommittingTranslog;
} else if (readers.isEmpty() == false && readers.get(readers.size() - 1).getGeneration() == location.generation) {
reader = readers.get(readers.size() - 1);
} else if (currentGeneration < location.generation) {
throw new IllegalStateException("location generation [" + location.generation + "] is greater than the current generation [" + currentGeneration + "]");
} else {
@ -467,33 +458,16 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
* Snapshots are fixed in time and will not be updated with future operations.
*/
public Snapshot newSnapshot() {
ensureOpen();
try (ReleasableLock lock = readLock.acquire()) {
ArrayList<TranslogReader> toOpen = new ArrayList<>();
toOpen.addAll(recoveredTranslogs);
if (currentCommittingTranslog != null) {
toOpen.add(currentCommittingTranslog);
}
toOpen.add(current);
return createSnapshot(toOpen.toArray(new TranslogReader[toOpen.size()]));
}
return createSnapshot(Long.MIN_VALUE);
}
private static Snapshot createSnapshot(TranslogReader... translogs) {
Snapshot[] snapshots = new Snapshot[translogs.length];
boolean success = false;
try {
for (int i = 0; i < translogs.length; i++) {
snapshots[i] = translogs[i].newSnapshot();
}
Snapshot snapshot = new MultiSnapshot(snapshots);
success = true;
return snapshot;
} finally {
if (success == false) {
Releasables.close(snapshots);
}
private Snapshot createSnapshot(long minGeneration) {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
Snapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current))
.filter(reader -> reader.getGeneration() >= minGeneration)
.map(BaseTranslogReader::newSnapshot).toArray(Snapshot[]::new);
return new MultiSnapshot(snapshots);
}
}
@ -502,25 +476,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
* while receiving future ones as well
*/
public Translog.View newView() {
// we need to acquire the read lock to make sure no new translog is created
// and will be missed by the view we're making
try (ReleasableLock lock = readLock.acquire()) {
ArrayList<TranslogReader> translogs = new ArrayList<>();
try {
if (currentCommittingTranslog != null) {
translogs.add(currentCommittingTranslog.clone());
}
translogs.add(current.newReaderFromWriter());
View view = new View(translogs, onViewClose);
// this is safe as we know that no new translog is being made at the moment
// (we hold a read lock) and the view will be notified of any future one
outstandingViews.add(view);
translogs.clear();
return view;
} finally {
// close if anything happend and we didn't reach the clear
IOUtils.closeWhileHandlingException(translogs);
}
ensureOpen();
View view = new View(lastCommittedTranslogFileGeneration);
outstandingViews.add(view);
return view;
}
}
@ -561,7 +521,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
*/
public boolean ensureSynced(Location location) throws IOException {
try (ReleasableLock lock = readLock.acquire()) {
if (location.generation == current.generation) { // if we have a new one it's already synced
if (location.generation == current.getGeneration()) { // if we have a new one it's already synced
ensureOpen();
return current.syncUpTo(location.translogLocation + location.size);
}
@ -604,151 +564,67 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return config;
}
private final class OnCloseRunnable implements Callback<ChannelReference> {
@Override
public void handle(ChannelReference channelReference) {
if (isReferencedGeneration(channelReference.getGeneration()) == false) {
Path translogPath = channelReference.getPath();
assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath;
// if the given translogPath is not the current we can safely delete the file since all references are released
logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
IOUtils.deleteFilesIgnoringExceptions(translogPath);
IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) {
for (Path path : stream) {
Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
if (matcher.matches()) {
long generation = Long.parseLong(matcher.group(1));
if (isReferencedGeneration(generation) == false) {
logger.trace("delete translog file - not referenced and not current anymore {}", path);
IOUtils.deleteFilesIgnoringExceptions(path);
IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
}
}
}
} catch (IOException e) {
logger.warn("failed to delete unreferenced translog files", e);
}
}
}
/**
* a view into the translog, capturing all translog file at the moment of creation
* and updated with any future translog.
*/
public static final class View implements Closeable {
public static final Translog.View EMPTY_VIEW = new View(Collections.emptyList(), null);
/**
* a view into the translog, capturing all translog file at the moment of creation
* and updated with any future translog.
*/
public class View implements Closeable {
boolean closed;
// last in this list is always FsTranslog.current
final List<TranslogReader> orderedTranslogs;
private final Callback<View> onClose;
AtomicBoolean closed = new AtomicBoolean();
final long minGeneration;
View(List<TranslogReader> orderedTranslogs, Callback<View> onClose) {
// clone so we can safely mutate..
this.orderedTranslogs = new ArrayList<>(orderedTranslogs);
this.onClose = onClose;
}
/**
* Called by the parent class when ever the current translog changes
*
* @param oldCurrent a new read only reader for the old current (should replace the previous reference)
* @param newCurrent a reader into the new current.
*/
synchronized void onNewTranslog(TranslogReader oldCurrent, TranslogReader newCurrent) throws IOException {
// even though the close method removes this view from outstandingViews, there is no synchronisation in place
// between that operation and an ongoing addition of a new translog, already having an iterator.
// As such, this method can be called despite of the fact that we are closed. We need to check and ignore.
if (closed) {
// we have to close the new references created for as as we will not hold them
IOUtils.close(oldCurrent, newCurrent);
return;
}
orderedTranslogs.remove(orderedTranslogs.size() - 1).close();
orderedTranslogs.add(oldCurrent);
orderedTranslogs.add(newCurrent);
View(long minGeneration) {
this.minGeneration = minGeneration;
}
/** this smallest translog generation in this view */
public synchronized long minTranslogGeneration() {
ensureOpen();
return orderedTranslogs.get(0).getGeneration();
public long minTranslogGeneration() {
return minGeneration;
}
/**
* The total number of operations in the view.
*/
public synchronized int totalOperations() {
int ops = 0;
for (TranslogReader translog : orderedTranslogs) {
int tops = translog.totalOperations();
if (tops == TranslogReader.UNKNOWN_OP_COUNT) {
return -1;
}
assert tops >= 0;
ops += tops;
}
return ops;
public int totalOperations() {
return Translog.this.totalOperations(minGeneration);
}
/**
* Returns the size in bytes of the files behind the view.
*/
public synchronized long sizeInBytes() {
long size = 0;
for (TranslogReader translog : orderedTranslogs) {
size += translog.sizeInBytes();
}
return size;
public long sizeInBytes() {
return Translog.this.sizeInBytes(minGeneration);
}
/** create a snapshot from this view */
public synchronized Snapshot snapshot() {
public Snapshot snapshot() {
ensureOpen();
return createSnapshot(orderedTranslogs.toArray(new TranslogReader[orderedTranslogs.size()]));
return Translog.this.createSnapshot(minGeneration);
}
void ensureOpen() {
if (closed) {
throw new ElasticsearchException("View is already closed");
if (closed.get()) {
throw new AlreadyClosedException("View is already closed");
}
}
@Override
public void close() {
final List<TranslogReader> toClose = new ArrayList<>();
try {
synchronized (this) {
if (closed == false) {
try {
if (onClose != null) {
onClose.handle(this);
}
} finally {
closed = true;
toClose.addAll(orderedTranslogs);
orderedTranslogs.clear();
}
}
}
} finally {
try {
// Close out of lock to prevent deadlocks between channel close which checks for
// references in InternalChannelReference.closeInternal (waiting on a read lock)
// and other FsTranslog#newTranslog calling FsView.onNewTranslog (while having a write lock)
IOUtils.close(toClose);
} catch (Exception e) {
throw new ElasticsearchException("failed to close view", e);
}
public void close() throws IOException {
if (closed.getAndSet(true) == false) {
logger.trace("closing view starting at translog [{}]", minTranslogGeneration());
boolean removed = outstandingViews.remove(this);
assert removed : "View was never set but was supposed to be removed";
trimUnreferencedReaders();
closeFilesIfNoPendingViews();
}
}
}
public static class Location implements Accountable, Comparable<Location> {
public final long generation;
@ -817,12 +693,12 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/**
* A snapshot of the transaction log, allows to iterate over all the transaction log operations.
*/
public interface Snapshot extends Releasable {
public interface Snapshot {
/**
* The total number of operations in the translog.
*/
int estimatedTotalOperations();
int totalOperations();
/**
* Returns the next operation in the snapshot or <code>null</code> if we reached the end.
@ -1320,13 +1196,12 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public void prepareCommit() throws IOException {
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (currentCommittingTranslog != null) {
throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration());
if (currentCommittingGeneration != NOT_SET_GENERATION) {
throw new IllegalStateException("already committing a translog with generation: " + currentCommittingGeneration);
}
final TranslogWriter oldCurrent = current;
oldCurrent.ensureOpen();
oldCurrent.sync();
currentCommittingTranslog = current.immutableReader();
currentCommittingGeneration = current.getGeneration();
TranslogReader currentCommittingTranslog = current.closeIntoReader();
readers.add(currentCommittingTranslog);
Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME);
assert Checkpoint.read(checkpoint).generation == currentCommittingTranslog.getGeneration();
Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(currentCommittingTranslog.getGeneration()));
@ -1335,14 +1210,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
IOUtils.fsync(commitCheckpoint.getParent(), true);
// create a new translog file - this will sync it and update the checkpoint data;
current = createWriter(current.getGeneration() + 1);
// notify all outstanding views of the new translog (no views are created now as
// we hold a write lock).
for (View view : outstandingViews) {
view.onNewTranslog(currentCommittingTranslog.clone(), current.newReaderFromWriter());
}
IOUtils.close(oldCurrent);
logger.trace("current translog set to [{}]", current.getGeneration());
assert oldCurrent.syncNeeded() == false : "old translog oldCurrent must not need a sync";
} catch (Throwable t) {
IOUtils.closeWhileHandlingException(this); // tragic event
@ -1352,24 +1220,53 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
@Override
public void commit() throws IOException {
ImmutableTranslogReader toClose = null;
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (currentCommittingTranslog == null) {
if (currentCommittingGeneration == NOT_SET_GENERATION) {
prepareCommit();
}
assert currentCommittingGeneration != NOT_SET_GENERATION;
assert readers.stream().filter(r -> r.getGeneration() == currentCommittingGeneration).findFirst().isPresent()
: "reader list doesn't contain committing generation [" + currentCommittingGeneration + "]";
lastCommittedTranslogFileGeneration = current.getGeneration(); // this is important - otherwise old files will not be cleaned up
if (recoveredTranslogs.isEmpty() == false) {
IOUtils.close(recoveredTranslogs);
recoveredTranslogs.clear();
}
toClose = this.currentCommittingTranslog;
this.currentCommittingTranslog = null;
} finally {
IOUtils.close(toClose);
currentCommittingGeneration = NOT_SET_GENERATION;
trimUnreferencedReaders();
}
}
void trimUnreferencedReaders() {
try (ReleasableLock ignored = writeLock.acquire()) {
if (closed.get()) {
// we're shutdown potentially on some tragic event - don't delete anything
return;
}
long minReferencedGen = outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE);
minReferencedGen = Math.min(lastCommittedTranslogFileGeneration, minReferencedGen);
final long finalMinReferencedGen = minReferencedGen;
List<TranslogReader> unreferenced = readers.stream().filter(r -> r.getGeneration() < finalMinReferencedGen).collect(Collectors.toList());
for (final TranslogReader unreferencedReader : unreferenced) {
Path translogPath = unreferencedReader.path();
logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
IOUtils.closeWhileHandlingException(unreferencedReader);
IOUtils.deleteFilesIgnoringExceptions(translogPath,
translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration())));
}
readers.removeAll(unreferenced);
}
}
void closeFilesIfNoPendingViews() throws IOException {
try (ReleasableLock ignored = writeLock.acquire()) {
if (closed.get() && outstandingViews.isEmpty()) {
logger.trace("closing files. translog is closed and there are no pending views");
ArrayList<Closeable> toClose = new ArrayList<>(readers);
toClose.add(current);
IOUtils.close(toClose);
}
}
}
@Override
public void rollback() throws IOException {
ensureOpen();
@ -1435,9 +1332,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return TranslogWriter.ChannelFactory.DEFAULT;
}
/** If this {@code Translog} was closed as a side-effect of a tragic exception,
* e.g. disk full while flushing a new segment, this returns the root cause exception.
* Otherwise (no tragic exception has occurred) it returns null. */
/**
* If this {@code Translog} was closed as a side-effect of a tragic exception,
* e.g. disk full while flushing a new segment, this returns the root cause exception.
* Otherwise (no tragic exception has occurred) it returns null.
*/
public Throwable getTragicException() {
return current.getTragicException();
}

View File

@ -27,161 +27,46 @@ import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.InputStreamDataInput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A base class for all classes that allows reading ops from translog files
* an immutable translog filereader
*/
public abstract class TranslogReader implements Closeable, Comparable<TranslogReader> {
public static final int UNKNOWN_OP_COUNT = -1;
public class TranslogReader extends BaseTranslogReader implements Closeable {
private static final byte LUCENE_CODEC_HEADER_BYTE = 0x3f;
private static final byte UNVERSIONED_TRANSLOG_HEADER_BYTE = 0x00;
protected final long generation;
protected final ChannelReference channelReference;
protected final FileChannel channel;
private final int totalOperations;
protected final long length;
protected final AtomicBoolean closed = new AtomicBoolean(false);
protected final long firstOperationOffset;
public TranslogReader(long generation, ChannelReference channelReference, long firstOperationOffset) {
this.generation = generation;
this.channelReference = channelReference;
this.channel = channelReference.getChannel();
this.firstOperationOffset = firstOperationOffset;
}
public long getGeneration() {
return this.generation;
}
public abstract long sizeInBytes();
abstract public int totalOperations();
public final long getFirstOperationOffset() {
return firstOperationOffset;
}
public Translog.Operation read(Translog.Location location) throws IOException {
assert location.generation == generation : "read location's translog generation [" + location.generation + "] is not [" + generation + "]";
ByteBuffer buffer = ByteBuffer.allocate(location.size);
try (BufferedChecksumStreamInput checksumStreamInput = checksummedStream(buffer, location.translogLocation, location.size, null)) {
return read(checksumStreamInput);
}
}
/** read the size of the op (i.e., number of bytes, including the op size) written at the given position */
private final int readSize(ByteBuffer reusableBuffer, long position) {
// read op size from disk
assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]";
try {
reusableBuffer.clear();
reusableBuffer.limit(4);
readBytes(reusableBuffer, position);
reusableBuffer.flip();
// Add an extra 4 to account for the operation size integer itself
final int size = reusableBuffer.getInt() + 4;
final long maxSize = sizeInBytes() - position;
if (size < 0 || size > maxSize) {
throw new TranslogCorruptedException("operation size is corrupted must be [0.." + maxSize + "] but was: " + size);
}
return size;
} catch (IOException e) {
throw new ElasticsearchException("unexpected exception reading from translog snapshot of " + this.channelReference.getPath(), e);
}
}
public Translog.Snapshot newSnapshot() {
final ByteBuffer reusableBuffer = ByteBuffer.allocate(1024);
final int totalOperations = totalOperations();
channelReference.incRef();
return newReaderSnapshot(totalOperations, reusableBuffer);
/**
* Create a reader of translog file channel. The length parameter should be consistent with totalOperations and point
* at the end of the last operation in this snapshot.
*/
public TranslogReader(long generation, FileChannel channel, Path path, long firstOperationOffset, long length, int totalOperations) {
super(generation, channel, path, firstOperationOffset);
this.length = length;
this.totalOperations = totalOperations;
}
/**
* reads an operation at the given position and returns it. The buffer length is equal to the number
* of bytes reads.
* Given a file, opens an {@link TranslogReader}, taking of checking and validating the file header.
*/
private final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, BufferedChecksumStreamInput reuse) throws IOException {
final ByteBuffer buffer;
if (reusableBuffer.capacity() >= opSize) {
buffer = reusableBuffer;
} else {
buffer = ByteBuffer.allocate(opSize);
}
buffer.clear();
buffer.limit(opSize);
readBytes(buffer, position);
buffer.flip();
return new BufferedChecksumStreamInput(new ByteBufferStreamInput(buffer), reuse);
}
protected Translog.Operation read(BufferedChecksumStreamInput inStream) throws IOException {
return Translog.readOperation(inStream);
}
/**
* reads bytes at position into the given buffer, filling it.
*/
abstract protected void readBytes(ByteBuffer buffer, long position) throws IOException;
@Override
public final void close() throws IOException {
if (closed.compareAndSet(false, true)) {
channelReference.decRef();
}
}
protected final boolean isClosed() {
return closed.get();
}
protected void ensureOpen() {
if (isClosed()) {
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed");
}
}
@Override
public String toString() {
return "translog [" + generation + "][" + channelReference.getPath() + "]";
}
@Override
public int compareTo(TranslogReader o) {
return Long.compare(getGeneration(), o.getGeneration());
}
/**
* Given a file, return a VersionedTranslogStream based on an
* optionally-existing header in the file. If the file does not exist, or
* has zero length, returns the latest version. If the header does not
* exist, assumes Version 0 of the translog file format.
*/
public static ImmutableTranslogReader open(ChannelReference channelReference, Checkpoint checkpoint, String translogUUID) throws IOException {
final FileChannel channel = channelReference.getChannel();
final Path path = channelReference.getPath();
assert channelReference.getGeneration() == checkpoint.generation : "expected generation: " + channelReference.getGeneration() + " but got: " + checkpoint.generation;
public static TranslogReader open(FileChannel channel, Path path, Checkpoint checkpoint, String translogUUID) throws IOException {
try {
if (checkpoint.offset == 0 && checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT) { // only old files can be empty
return new LegacyTranslogReader(channelReference.getGeneration(), channelReference, 0);
}
InputStreamStreamInput headerStream = new InputStreamStreamInput(Channels.newInputStream(channel)); // don't close
InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel)); // don't close
// Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the
// header, in binary this looks like:
//
@ -208,20 +93,17 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe
// ourselves here, because it allows us to read the first
// byte separately
if (header != CodecUtil.CODEC_MAGIC) {
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header");
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path);
}
// Confirm the rest of the header using CodecUtil, extracting
// the translog version
int version = CodecUtil.checkHeaderNoMagic(new InputStreamDataInput(headerStream), TranslogWriter.TRANSLOG_CODEC, 1, Integer.MAX_VALUE);
switch (version) {
case TranslogWriter.VERSION_CHECKSUMS:
assert checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT : "expected unknown op count but got: " + checkpoint.numOps;
assert checkpoint.offset == Files.size(path) : "offset(" + checkpoint.offset + ") != file_size(" + Files.size(path) + ") for: " + path;
// legacy - we still have to support it somehow
return new LegacyTranslogReaderBase(channelReference.getGeneration(), channelReference, CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC), checkpoint.offset);
throw new IllegalStateException("pre-2.0 translog found [" + path + "]");
case TranslogWriter.VERSION_CHECKPOINTS:
assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path;
assert checkpoint.numOps > TranslogReader.UNKNOWN_OP_COUNT: "expected at least 0 operatin but got: " + checkpoint.numOps;
assert checkpoint.numOps >= 0 : "expected at least 0 operatin but got: " + checkpoint.numOps;
assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint;
int len = headerStream.readInt();
if (len > channel.size()) {
@ -232,78 +114,61 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe
headerStream.read(ref.bytes, ref.offset, ref.length);
BytesRef uuidBytes = new BytesRef(translogUUID);
if (uuidBytes.bytesEquals(ref) == false) {
throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog");
throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog. path:" + path);
}
return new ImmutableTranslogReader(channelReference.getGeneration(), channelReference, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + RamUsageEstimator.NUM_BYTES_INT, checkpoint.offset, checkpoint.numOps);
return new TranslogReader(checkpoint.generation, channel, path, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + RamUsageEstimator.NUM_BYTES_INT, checkpoint.offset, checkpoint.numOps);
default:
throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path);
}
} else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) {
assert checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT : "expected unknown op count but got: " + checkpoint.numOps;
assert checkpoint.offset == Files.size(path) : "offset(" + checkpoint.offset + ") != file_size(" + Files.size(path) + ") for: " + path;
return new LegacyTranslogReader(channelReference.getGeneration(), channelReference, checkpoint.offset);
throw new IllegalStateException("pre-1.4 translog found [" + path + "]");
} else {
throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f");
throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f. path:" + path);
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
throw new TranslogCorruptedException("Translog header corrupted", e);
throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e);
}
}
public Path path() {
return channelReference.getPath();
public long sizeInBytes() {
return length;
}
protected Translog.Snapshot newReaderSnapshot(int totalOperations, ByteBuffer reusableBuffer) {
return new ReaderSnapshot(totalOperations, reusableBuffer);
public int totalOperations() {
return totalOperations;
}
class ReaderSnapshot implements Translog.Snapshot {
private final AtomicBoolean closed;
private final int totalOperations;
private final ByteBuffer reusableBuffer;
long position;
int readOperations;
private BufferedChecksumStreamInput reuse;
public ReaderSnapshot(int totalOperations, ByteBuffer reusableBuffer) {
this.totalOperations = totalOperations;
this.reusableBuffer = reusableBuffer;
closed = new AtomicBoolean(false);
position = firstOperationOffset;
readOperations = 0;
reuse = null;
/**
* reads an operation at the given position into the given buffer.
*/
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
if (position >= length) {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]");
}
@Override
public final int estimatedTotalOperations() {
return totalOperations;
if (position < firstOperationOffset) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstOperationOffset + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}
@Override
public Translog.Operation next() throws IOException {
if (readOperations < totalOperations) {
assert readOperations < totalOperations : "readOpeartions must be less than totalOperations";
return readOperation();
} else {
return null;
}
public Checkpoint getInfo() {
return new Checkpoint(length, totalOperations, getGeneration());
}
@Override
public final void close() throws IOException {
if (closed.compareAndSet(false, true)) {
channel.close();
}
}
protected final Translog.Operation readOperation() throws IOException {
final int opSize = readSize(reusableBuffer, position);
reuse = checksummedStream(reusableBuffer, position, opSize, reuse);
Translog.Operation op = read(reuse);
position += opSize;
readOperations++;
return op;
}
protected final boolean isClosed() {
return closed.get();
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
channelReference.decRef();
}
protected void ensureOpen() {
if (isClosed()) {
throw new AlreadyClosedException(toString() + " is already closed");
}
}
}

View File

@ -16,7 +16,6 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.elasticsearch.common.io.Channels;
@ -24,68 +23,82 @@ import org.elasticsearch.common.io.Channels;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
/**
* a translog reader which is fixed in length
*/
public class ImmutableTranslogReader extends TranslogReader {
public class TranslogSnapshot extends BaseTranslogReader implements Translog.Snapshot {
private final int totalOperations;
protected final long length;
private final ByteBuffer reusableBuffer;
private long position;
private int readOperations;
private BufferedChecksumStreamInput reuse;
/**
* Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point
* at the end of the last operation in this snapshot.
*/
public ImmutableTranslogReader(long generation, ChannelReference channelReference, long firstOperationOffset, long length, int totalOperations) {
super(generation, channelReference, firstOperationOffset);
public TranslogSnapshot(long generation, FileChannel channel, Path path, long firstOperationOffset, long length, int totalOperations) {
super(generation, channel, path, firstOperationOffset);
this.length = length;
this.totalOperations = totalOperations;
this.reusableBuffer = ByteBuffer.allocate(1024);
readOperations = 0;
position = firstOperationOffset;
reuse = null;
}
@Override
public final TranslogReader clone() {
if (channelReference.tryIncRef()) {
try {
ImmutableTranslogReader reader = newReader(generation, channelReference, firstOperationOffset, length, totalOperations);
channelReference.incRef(); // for the new object
return reader;
} finally {
channelReference.decRef();
}
public final int totalOperations() {
return totalOperations;
}
@Override
public Translog.Operation next() throws IOException {
if (readOperations < totalOperations) {
return readOperation();
} else {
throw new IllegalStateException("can't increment translog [" + generation + "] channel ref count");
return null;
}
}
protected ImmutableTranslogReader newReader(long generation, ChannelReference channelReference, long offset, long length, int totalOperations) {
return new ImmutableTranslogReader(generation, channelReference, offset, length, totalOperations);
protected final Translog.Operation readOperation() throws IOException {
final int opSize = readSize(reusableBuffer, position);
reuse = checksummedStream(reusableBuffer, position, opSize, reuse);
Translog.Operation op = read(reuse);
position += opSize;
readOperations++;
return op;
}
public long sizeInBytes() {
return length;
}
public int totalOperations() {
return totalOperations;
}
/**
* reads an operation at the given position into the given buffer.
*/
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
if (position >= length) {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]");
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" + getGeneration() + "], path: [" + path + "]");
}
if (position < firstOperationOffset) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstOperationOffset + "]");
if (position < getFirstOperationOffset()) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}
public Checkpoint getInfo() {
return new Checkpoint(length, totalOperations, getGeneration());
@Override
public String toString() {
return "TranslogSnapshot{" +
"readOperations=" + readOperations +
", position=" + position +
", totalOperations=" + totalOperations +
", length=" + length +
", reusableBuffer=" + reusableBuffer +
'}';
}
}
}

View File

@ -25,24 +25,23 @@ import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.index.shard.ShardId;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.concurrent.atomic.AtomicBoolean;
public class TranslogWriter extends TranslogReader {
public class TranslogWriter extends BaseTranslogReader implements Closeable {
public static final String TRANSLOG_CODEC = "translog";
public static final int VERSION_CHECKSUMS = 1;
@ -61,11 +60,14 @@ public class TranslogWriter extends TranslogReader {
/* the total offset of this file including the bytes written to the file as well as into the buffer */
private volatile long totalOffset;
public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference, ByteSizeValue bufferSize) throws IOException {
super(generation, channelReference, channelReference.getChannel().position());
protected final AtomicBoolean closed = new AtomicBoolean(false);
public TranslogWriter(ShardId shardId, long generation, FileChannel channel, Path path, ByteSizeValue bufferSize) throws IOException {
super(generation, channel, path, channel.position());
this.shardId = shardId;
this.outputStream = new BufferedChannelOutputStream(java.nio.channels.Channels.newOutputStream(channelReference.getChannel()), bufferSize.bytesAsInt());
this.lastSyncedOffset = channelReference.getChannel().position();
this.outputStream = new BufferedChannelOutputStream(java.nio.channels.Channels.newOutputStream(channel), bufferSize.bytesAsInt());
this.lastSyncedOffset = channel.position();
totalOffset = lastSyncedOffset;
}
@ -74,10 +76,10 @@ public class TranslogWriter extends TranslogReader {
}
private static int getHeaderLength(int uuidLength) {
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT;
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT;
}
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
final BytesRef ref = new BytesRef(translogUUID);
final int headerLength = getHeaderLength(ref.length);
final FileChannel channel = channelFactory.open(file);
@ -90,7 +92,7 @@ public class TranslogWriter extends TranslogReader {
out.writeBytes(ref.bytes, ref.offset, ref.length);
channel.force(true);
writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE);
final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize);
final TranslogWriter writer = new TranslogWriter(shardId, fileGeneration, channel, file, bufferSize);
return writer;
} catch (Throwable throwable) {
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
@ -99,9 +101,12 @@ public class TranslogWriter extends TranslogReader {
throw throwable;
}
}
/** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception,
* e.g. disk full while flushing a new segment, this returns the root cause exception.
* Otherwise (no tragic exception has occurred) it returns null. */
/**
* If this {@code TranslogWriter} was closed as a side-effect of a tragic exception,
* e.g. disk full while flushing a new segment, this returns the root cause exception.
* Otherwise (no tragic exception has occurred) it returns null.
*/
public Throwable getTragicException() {
return tragedy;
}
@ -110,7 +115,9 @@ public class TranslogWriter extends TranslogReader {
assert throwable != null : "throwable must not be null in a tragic event";
if (tragedy == null) {
tragedy = throwable;
} else {
} else if (tragedy != throwable) {
// it should be safe to call closeWithTragicEvents on multiple layers without
// worrying about self suppression.
tragedy.addSuppressed(throwable);
}
close();
@ -134,29 +141,27 @@ public class TranslogWriter extends TranslogReader {
}
/**
* write all buffered ops to disk and fsync file
* write all buffered ops to disk and fsync file.
*
* Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before
* raising the exception.
*/
public void sync() throws IOException {
if (syncNeeded()) {
synchronized (this) {
ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event
channelReference.incRef();
ensureOpen();
final long offsetToSync;
final int opsCounter;
try {
final long offsetToSync;
final int opsCounter;
outputStream.flush();
offsetToSync = totalOffset;
opsCounter = operationCounter;
try {
checkpoint(offsetToSync, opsCounter, channelReference);
} catch (Throwable ex) {
closeWithTragicEvent(ex);
throw ex;
}
lastSyncedOffset = offsetToSync;
} finally {
channelReference.decRef();
checkpoint(offsetToSync, opsCounter, generation, channel, path);
} catch (Throwable ex) {
closeWithTragicEvent(ex);
throw ex;
}
lastSyncedOffset = offsetToSync;
}
}
}
@ -177,76 +182,36 @@ public class TranslogWriter extends TranslogReader {
}
/**
* returns a new reader that follows the current writes (most importantly allows making
* repeated snapshots that includes new content)
* closes this writer and transfers it's underlying file channel to a new immutable reader
*/
public TranslogReader newReaderFromWriter() {
ensureOpen();
channelReference.incRef();
boolean success = false;
public synchronized TranslogReader closeIntoReader() throws IOException {
try {
final TranslogReader reader = new InnerReader(this.generation, firstOperationOffset, channelReference);
success = true;
return reader;
} finally {
if (!success) {
channelReference.decRef();
}
sync(); // sync before we close..
} catch (IOException e) {
closeWithTragicEvent(e);
throw e;
}
}
/**
* returns a new immutable reader which only exposes the current written operation *
*/
public ImmutableTranslogReader immutableReader() throws TranslogException {
if (channelReference.tryIncRef()) {
synchronized (this) {
try {
ensureOpen();
outputStream.flush();
ImmutableTranslogReader reader = new ImmutableTranslogReader(this.generation, channelReference, firstOperationOffset, getWrittenOffset(), operationCounter);
channelReference.incRef(); // for new reader
return reader;
} catch (Exception e) {
throw new TranslogException(shardId, "exception while creating an immutable reader", e);
} finally {
channelReference.decRef();
}
}
if (closed.compareAndSet(false, true)) {
return new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter);
} else {
throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] ref count");
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy);
}
}
@Override
public synchronized Translog.Snapshot newSnapshot() {
ensureOpen();
try {
sync();
} catch (IOException e) {
throw new TranslogException(shardId, "exception while syncing before creating a snapshot", e);
}
return super.newSnapshot();
}
private long getWrittenOffset() throws IOException {
return channelReference.getChannel().position();
}
/**
* this class is used when one wants a reference to this file which exposes all recently written operation.
* as such it needs access to the internals of the current reader
*/
final class InnerReader extends TranslogReader {
public InnerReader(long generation, long fistOperationOffset, ChannelReference channelReference) {
super(generation, channelReference, fistOperationOffset);
}
@Override
public long sizeInBytes() {
return TranslogWriter.this.sizeInBytes();
}
@Override
public int totalOperations() {
return TranslogWriter.this.totalOperations();
}
@Override
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
TranslogWriter.this.readBytes(buffer, position);
}
return channel.position();
}
/**
@ -264,13 +229,13 @@ public class TranslogWriter extends TranslogReader {
@Override
protected void readBytes(ByteBuffer targetBuffer, long position) throws IOException {
if (position+targetBuffer.remaining() > getWrittenOffset()) {
if (position + targetBuffer.remaining() > getWrittenOffset()) {
synchronized (this) {
// we only flush here if it's really really needed - try to minimize the impact of the read operation
// in some cases ie. a tragic event we might still be able to read the relevant value
// which is not really important in production but some test can make most strict assumptions
// if we don't fail in this call unless absolutely necessary.
if (position+targetBuffer.remaining() > getWrittenOffset()) {
if (position + targetBuffer.remaining() > getWrittenOffset()) {
outputStream.flush();
}
}
@ -280,9 +245,9 @@ public class TranslogWriter extends TranslogReader {
Channels.readFromFileChannelWithEofException(channel, position, targetBuffer);
}
private synchronized void checkpoint(long lastSyncPosition, int operationCounter, ChannelReference channelReference) throws IOException {
channelReference.getChannel().force(false);
writeCheckpoint(lastSyncPosition, operationCounter, channelReference.getPath().getParent(), channelReference.getGeneration(), StandardOpenOption.WRITE);
private synchronized void checkpoint(long lastSyncPosition, int operationCounter, long generation, FileChannel translogFileChannel, Path translogFilePath) throws IOException {
translogFileChannel.force(false);
writeCheckpoint(lastSyncPosition, operationCounter, translogFilePath.getParent(), generation, StandardOpenOption.WRITE);
}
private static void writeCheckpoint(long syncPosition, int numOperations, Path translogFile, long generation, OpenOption... options) throws IOException {
@ -307,6 +272,17 @@ public class TranslogWriter extends TranslogReader {
}
}
@Override
public final void close() throws IOException {
if (closed.compareAndSet(false, true)) {
channel.close();
}
}
protected final boolean isClosed() {
return closed.get();
}
private final class BufferedChannelOutputStream extends BufferedOutputStream {

View File

@ -22,9 +22,7 @@ package org.elasticsearch.indices;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
@ -57,52 +55,6 @@ import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.query.BoolQueryParser;
import org.elasticsearch.index.query.BoostingQueryParser;
import org.elasticsearch.index.query.CommonTermsQueryParser;
import org.elasticsearch.index.query.ConstantScoreQueryParser;
import org.elasticsearch.index.query.DisMaxQueryParser;
import org.elasticsearch.index.query.ExistsQueryParser;
import org.elasticsearch.index.query.FieldMaskingSpanQueryParser;
import org.elasticsearch.index.query.FuzzyQueryParser;
import org.elasticsearch.index.query.GeoBoundingBoxQueryParser;
import org.elasticsearch.index.query.GeoDistanceQueryParser;
import org.elasticsearch.index.query.GeoDistanceRangeQueryParser;
import org.elasticsearch.index.query.GeoPolygonQueryParser;
import org.elasticsearch.index.query.GeoShapeQueryParser;
import org.elasticsearch.index.query.GeohashCellQuery;
import org.elasticsearch.index.query.HasChildQueryParser;
import org.elasticsearch.index.query.HasParentQueryParser;
import org.elasticsearch.index.query.IdsQueryParser;
import org.elasticsearch.index.query.IndicesQueryParser;
import org.elasticsearch.index.query.MatchAllQueryParser;
import org.elasticsearch.index.query.MatchNoneQueryParser;
import org.elasticsearch.index.query.MatchQueryParser;
import org.elasticsearch.index.query.MoreLikeThisQueryParser;
import org.elasticsearch.index.query.MultiMatchQueryParser;
import org.elasticsearch.index.query.NestedQueryParser;
import org.elasticsearch.index.query.PrefixQueryParser;
import org.elasticsearch.index.query.QueryParser;
import org.elasticsearch.index.query.QueryStringQueryParser;
import org.elasticsearch.index.query.RangeQueryParser;
import org.elasticsearch.index.query.RegexpQueryParser;
import org.elasticsearch.index.query.ScriptQueryParser;
import org.elasticsearch.index.query.SimpleQueryStringParser;
import org.elasticsearch.index.query.SpanContainingQueryParser;
import org.elasticsearch.index.query.SpanFirstQueryParser;
import org.elasticsearch.index.query.SpanMultiTermQueryParser;
import org.elasticsearch.index.query.SpanNearQueryParser;
import org.elasticsearch.index.query.SpanNotQueryParser;
import org.elasticsearch.index.query.SpanOrQueryParser;
import org.elasticsearch.index.query.SpanTermQueryParser;
import org.elasticsearch.index.query.SpanWithinQueryParser;
import org.elasticsearch.index.query.TemplateQueryParser;
import org.elasticsearch.index.query.TermQueryParser;
import org.elasticsearch.index.query.TermsQueryParser;
import org.elasticsearch.index.query.TypeQueryParser;
import org.elasticsearch.index.query.WildcardQueryParser;
import org.elasticsearch.index.query.WrapperQueryParser;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
@ -111,7 +63,6 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.recovery.RecoverySource;
import org.elasticsearch.indices.recovery.RecoveryTarget;
@ -127,9 +78,6 @@ import java.util.Map;
*/
public class IndicesModule extends AbstractModule {
private final ExtensionPoint.ClassSet<QueryParser> queryParsers
= new ExtensionPoint.ClassSet<>("query_parser", QueryParser.class);
private final Map<String, Mapper.TypeParser> mapperParsers
= new LinkedHashMap<>();
// Use a LinkedHashMap for metadataMappers because iteration order matters
@ -137,62 +85,10 @@ public class IndicesModule extends AbstractModule {
= new LinkedHashMap<>();
public IndicesModule() {
registerBuiltinQueryParsers();
registerBuiltInMappers();
registerBuiltInMetadataMappers();
}
private void registerBuiltinQueryParsers() {
registerQueryParser(MatchQueryParser.class);
registerQueryParser(MultiMatchQueryParser.class);
registerQueryParser(NestedQueryParser.class);
registerQueryParser(HasChildQueryParser.class);
registerQueryParser(HasParentQueryParser.class);
registerQueryParser(DisMaxQueryParser.class);
registerQueryParser(IdsQueryParser.class);
registerQueryParser(MatchAllQueryParser.class);
registerQueryParser(QueryStringQueryParser.class);
registerQueryParser(BoostingQueryParser.class);
registerQueryParser(BoolQueryParser.class);
registerQueryParser(TermQueryParser.class);
registerQueryParser(TermsQueryParser.class);
registerQueryParser(FuzzyQueryParser.class);
registerQueryParser(RegexpQueryParser.class);
registerQueryParser(RangeQueryParser.class);
registerQueryParser(PrefixQueryParser.class);
registerQueryParser(WildcardQueryParser.class);
registerQueryParser(ConstantScoreQueryParser.class);
registerQueryParser(SpanTermQueryParser.class);
registerQueryParser(SpanNotQueryParser.class);
registerQueryParser(SpanWithinQueryParser.class);
registerQueryParser(SpanContainingQueryParser.class);
registerQueryParser(FieldMaskingSpanQueryParser.class);
registerQueryParser(SpanFirstQueryParser.class);
registerQueryParser(SpanNearQueryParser.class);
registerQueryParser(SpanOrQueryParser.class);
registerQueryParser(MoreLikeThisQueryParser.class);
registerQueryParser(WrapperQueryParser.class);
registerQueryParser(IndicesQueryParser.class);
registerQueryParser(CommonTermsQueryParser.class);
registerQueryParser(SpanMultiTermQueryParser.class);
registerQueryParser(FunctionScoreQueryParser.class);
registerQueryParser(SimpleQueryStringParser.class);
registerQueryParser(TemplateQueryParser.class);
registerQueryParser(TypeQueryParser.class);
registerQueryParser(ScriptQueryParser.class);
registerQueryParser(GeoDistanceQueryParser.class);
registerQueryParser(GeoDistanceRangeQueryParser.class);
registerQueryParser(GeoBoundingBoxQueryParser.class);
registerQueryParser(GeohashCellQuery.Parser.class);
registerQueryParser(GeoPolygonQueryParser.class);
registerQueryParser(ExistsQueryParser.class);
registerQueryParser(MatchNoneQueryParser.class);
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerQueryParser(GeoShapeQueryParser.class);
}
}
private void registerBuiltInMappers() {
registerMapper(ByteFieldMapper.CONTENT_TYPE, new ByteFieldMapper.TypeParser());
registerMapper(ShortFieldMapper.CONTENT_TYPE, new ShortFieldMapper.TypeParser());
@ -236,10 +132,6 @@ public class IndicesModule extends AbstractModule {
// last so that it can see all other mappers, including those coming from plugins
}
public void registerQueryParser(Class<? extends QueryParser> queryParser) {
queryParsers.registerExtension(queryParser);
}
/**
* Register a mapper for the given type.
*/
@ -262,7 +154,6 @@ public class IndicesModule extends AbstractModule {
@Override
protected void configure() {
bindQueryParsersExtension();
bindMapperExtension();
bind(IndicesService.class).asEagerSingleton();
@ -283,7 +174,6 @@ public class IndicesModule extends AbstractModule {
bind(IndicesFieldDataCacheListener.class).asEagerSingleton();
bind(TermVectorsService.class).asEagerSingleton();
bind(NodeServicesProvider.class).asEagerSingleton();
bind(ShapeBuilderRegistry.class).asEagerSingleton();
}
// public for testing
@ -302,9 +192,4 @@ public class IndicesModule extends AbstractModule {
protected void bindMapperExtension() {
bind(MapperRegistry.class).toInstance(getMapperRegistry());
}
protected void bindQueryParsersExtension() {
queryParsers.bind(binder());
bind(IndicesQueriesRegistry.class).asEagerSingleton();
}
}

View File

@ -412,8 +412,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} else {
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, op, mappingType);
}
// we don't apply default, since it has been applied when the mappings were parsed initially
mapperService.merge(mappingType, mappingSource, false, true);
mapperService.merge(mappingType, mappingSource, MapperService.MergeReason.MAPPING_RECOVERY, true);
if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
requiresRefresh = true;

View File

@ -19,39 +19,18 @@
package org.elasticsearch.indices.query;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.EmptyQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParser;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryParser;
public class IndicesQueriesRegistry extends AbstractComponent {
private Map<String, QueryParser<?>> queryParsers;
@Inject
public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) {
public IndicesQueriesRegistry(Settings settings, Map<String, QueryParser<?>> queryParsers) {
super(settings);
Map<String, QueryParser<?>> queryParsers = new HashMap<>();
for (@SuppressWarnings("unchecked") QueryParser<? extends QueryBuilder> queryParser : injectedQueryParsers) {
for (String name : queryParser.names()) {
queryParsers.put(name, queryParser);
}
@SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = queryParser.getBuilderPrototype();
namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb);
}
// EmptyQueryBuilder is not registered as query parser but used internally.
// We need to register it with the NamedWriteableRegistry in order to serialize it
namedWriteableRegistry.registerPrototype(QueryBuilder.class, EmptyQueryBuilder.PROTOTYPE);
this.queryParsers = unmodifiableMap(queryParsers);
this.queryParsers = queryParsers;
}
/**

View File

@ -37,6 +37,7 @@ import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@ -82,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
}
private RecoveryResponse recover(final StartRecoveryRequest request) {
private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name());
final IndexShard shard = indexService.getShard(request.shardId().id());

View File

@ -120,7 +120,7 @@ public class RecoverySourceHandler {
/**
* performs the recovery from the local engine to the target
*/
public RecoveryResponse recoverToTarget() {
public RecoveryResponse recoverToTarget() throws IOException {
try (Translog.View translogView = shard.acquireTranslogView()) {
logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration());
final IndexCommit phase1Snapshot;
@ -144,8 +144,8 @@ public class RecoverySourceHandler {
}
logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations());
try (Translog.Snapshot phase2Snapshot = translogView.snapshot()) {
phase2(phase2Snapshot);
try {
phase2(translogView.snapshot());
} catch (Throwable e) {
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
}
@ -308,7 +308,7 @@ public class RecoverySourceHandler {
});
}
prepareTargetForTranslog(translogView);
prepareTargetForTranslog(translogView.totalOperations());
logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
response.phase1Time = stopWatch.totalTime().millis();
@ -320,8 +320,7 @@ public class RecoverySourceHandler {
}
protected void prepareTargetForTranslog(final Translog.View translogView) {
protected void prepareTargetForTranslog(final int totalTranslogOps) {
StopWatch stopWatch = new StopWatch().start();
logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode());
final long startEngineStart = stopWatch.totalTime().millis();
@ -332,7 +331,7 @@ public class RecoverySourceHandler {
// operations. This ensures the shard engine is started and disables
// garbage collection (not the JVM's GC!) of tombstone deletes
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG,
new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), translogView.totalOperations()),
new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), totalTranslogOps),
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
});
@ -463,14 +462,14 @@ public class RecoverySourceHandler {
// make sense to re-enable throttling in this phase
cancellableThreads.execute(() -> {
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations());
request.recoveryId(), request.shardId(), operations, snapshot.totalOperations());
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
});
if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}",
indexName, shardId, ops, new ByteSizeValue(size),
snapshot.estimatedTotalOperations(),
snapshot.totalOperations(),
request.targetNode());
}
@ -488,7 +487,7 @@ public class RecoverySourceHandler {
if (!operations.isEmpty()) {
cancellableThreads.execute(() -> {
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations());
request.recoveryId(), request.shardId(), operations, snapshot.totalOperations());
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest,
recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
});
@ -497,7 +496,7 @@ public class RecoverySourceHandler {
if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] sent final batch of [{}][{}] (total: [{}]) translog operations to {}",
indexName, shardId, ops, new ByteSizeValue(size),
snapshot.estimatedTotalOperations(),
snapshot.totalOperations(),
request.targetNode());
}
return totalOperations;

View File

@ -58,7 +58,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
shard.failShard("failed to close engine (phase1)", e);
}
}
prepareTargetForTranslog(Translog.View.EMPTY_VIEW);
prepareTargetForTranslog(0);
finalizeRecovery();
return response;
} catch (Throwable t) {

View File

@ -65,6 +65,9 @@ public class OsStats implements Streamable, ToXContent {
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
static final XContentBuilderString LOAD_AVERAGE = new XContentBuilderString("load_average");
static final XContentBuilderString LOAD_AVERAGE_1M = new XContentBuilderString("1m");
static final XContentBuilderString LOAD_AVERAGE_5M = new XContentBuilderString("5m");
static final XContentBuilderString LOAD_AVERAGE_15M = new XContentBuilderString("15m");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString SWAP = new XContentBuilderString("swap");
@ -77,7 +80,6 @@ public class OsStats implements Streamable, ToXContent {
static final XContentBuilderString FREE_PERCENT = new XContentBuilderString("free_percent");
static final XContentBuilderString USED_PERCENT = new XContentBuilderString("used_percent");
}
@Override
@ -88,11 +90,17 @@ public class OsStats implements Streamable, ToXContent {
builder.startObject(Fields.CPU);
builder.field(Fields.PERCENT, cpu.getPercent());
if (cpu.getLoadAverage() != null) {
builder.startArray(Fields.LOAD_AVERAGE);
builder.value(cpu.getLoadAverage()[0]);
builder.value(cpu.getLoadAverage()[1]);
builder.value(cpu.getLoadAverage()[2]);
builder.endArray();
builder.startObject(Fields.LOAD_AVERAGE);
if (cpu.getLoadAverage()[0] != -1) {
builder.field(Fields.LOAD_AVERAGE_1M, cpu.getLoadAverage()[0]);
}
if (cpu.getLoadAverage()[1] != -1) {
builder.field(Fields.LOAD_AVERAGE_5M, cpu.getLoadAverage()[1]);
}
if (cpu.getLoadAverage()[2] != -1) {
builder.field(Fields.LOAD_AVERAGE_15M, cpu.getLoadAverage()[2]);
}
builder.endObject();
}
builder.endObject();
}

View File

@ -37,6 +37,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
@ -164,6 +165,7 @@ public class Node implements Releasable {
final NetworkService networkService = new NetworkService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
final ThreadPool threadPool = new ThreadPool(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
try {
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);
@ -178,7 +180,7 @@ public class Node implements Releasable {
modules.add(new SettingsModule(this.settings, settingsFilter));
modules.add(new EnvironmentModule(environment));
modules.add(new NodeModule(this, monitorService));
modules.add(new NetworkModule(networkService, settings, false));
modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry));
modules.add(new ScriptModule(this.settings));
modules.add(new NodeEnvironmentModule(nodeEnvironment));
modules.add(new ClusterNameModule(this.settings));
@ -186,7 +188,7 @@ public class Node implements Releasable {
modules.add(new DiscoveryModule(this.settings));
modules.add(new ClusterModule(this.settings));
modules.add(new IndicesModule());
modules.add(new SearchModule());
modules.add(new SearchModule(settings, namedWriteableRegistry));
modules.add(new ActionModule(false));
modules.add(new GatewayModule(settings));
modules.add(new NodeClientModule());

View File

@ -71,7 +71,7 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH
this.headers = headers;
}
private static void copyHeadersAndContext(ActionRequest actionRequest, RestRequest restRequest, Set<String> headers) {
private static void copyHeadersAndContext(ActionRequest<?> actionRequest, RestRequest restRequest, Set<String> headers) {
for (String usefulHeader : headers) {
String headerValue = restRequest.header(usefulHeader);
if (headerValue != null) {
@ -82,7 +82,8 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
protected <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
copyHeadersAndContext(request, restRequest, headers);
super.doExecute(action, request, listener);
}

View File

@ -266,9 +266,9 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent()));
boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null;
table.addCell(!hasLoadAverage ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]));
table.addCell(!hasLoadAverage ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]));
table.addCell(!hasLoadAverage ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]));
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0]));
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1]));
table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2]));
table.addCell(jvmStats == null ? null : jvmStats.getUptime());
table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-");
table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-");

View File

@ -19,10 +19,89 @@
package org.elasticsearch.search;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.builders.CircleBuilder;
import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
import org.elasticsearch.common.geo.builders.LineStringBuilder;
import org.elasticsearch.common.geo.builders.MultiLineStringBuilder;
import org.elasticsearch.common.geo.builders.MultiPointBuilder;
import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
import org.elasticsearch.common.geo.builders.PointBuilder;
import org.elasticsearch.common.geo.builders.PolygonBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.BoolQueryParser;
import org.elasticsearch.index.query.BoostingQueryParser;
import org.elasticsearch.index.query.CommonTermsQueryParser;
import org.elasticsearch.index.query.ConstantScoreQueryParser;
import org.elasticsearch.index.query.DisMaxQueryParser;
import org.elasticsearch.index.query.EmptyQueryBuilder;
import org.elasticsearch.index.query.ExistsQueryParser;
import org.elasticsearch.index.query.FieldMaskingSpanQueryParser;
import org.elasticsearch.index.query.FuzzyQueryParser;
import org.elasticsearch.index.query.GeoBoundingBoxQueryParser;
import org.elasticsearch.index.query.GeoDistanceQueryParser;
import org.elasticsearch.index.query.GeoDistanceRangeQueryParser;
import org.elasticsearch.index.query.GeoPolygonQueryParser;
import org.elasticsearch.index.query.GeoShapeQueryParser;
import org.elasticsearch.index.query.GeohashCellQuery;
import org.elasticsearch.index.query.HasChildQueryParser;
import org.elasticsearch.index.query.HasParentQueryParser;
import org.elasticsearch.index.query.IdsQueryParser;
import org.elasticsearch.index.query.IndicesQueryParser;
import org.elasticsearch.index.query.MatchAllQueryParser;
import org.elasticsearch.index.query.MatchNoneQueryParser;
import org.elasticsearch.index.query.MatchQueryParser;
import org.elasticsearch.index.query.MoreLikeThisQueryParser;
import org.elasticsearch.index.query.MultiMatchQueryParser;
import org.elasticsearch.index.query.NestedQueryParser;
import org.elasticsearch.index.query.PrefixQueryParser;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParser;
import org.elasticsearch.index.query.QueryStringQueryParser;
import org.elasticsearch.index.query.RangeQueryParser;
import org.elasticsearch.index.query.RegexpQueryParser;
import org.elasticsearch.index.query.ScriptQueryParser;
import org.elasticsearch.index.query.SimpleQueryStringParser;
import org.elasticsearch.index.query.SpanContainingQueryParser;
import org.elasticsearch.index.query.SpanFirstQueryParser;
import org.elasticsearch.index.query.SpanMultiTermQueryParser;
import org.elasticsearch.index.query.SpanNearQueryParser;
import org.elasticsearch.index.query.SpanNotQueryParser;
import org.elasticsearch.index.query.SpanOrQueryParser;
import org.elasticsearch.index.query.SpanTermQueryParser;
import org.elasticsearch.index.query.SpanWithinQueryParser;
import org.elasticsearch.index.query.TemplateQueryParser;
import org.elasticsearch.index.query.TermQueryParser;
import org.elasticsearch.index.query.TermsQueryParser;
import org.elasticsearch.index.query.TypeQueryParser;
import org.elasticsearch.index.query.WildcardQueryParser;
import org.elasticsearch.index.query.WrapperQueryParser;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.ScoreFunctionParserMapper;
import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser;
import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.action.SearchServiceTransportAction;
import org.elasticsearch.search.aggregations.AggregationParseElement;
import org.elasticsearch.search.aggregations.AggregationPhase;
@ -149,9 +228,6 @@ import org.elasticsearch.search.query.QueryPhase;
import org.elasticsearch.search.suggest.Suggester;
import org.elasticsearch.search.suggest.Suggesters;
import java.util.HashSet;
import java.util.Set;
/**
*
*/
@ -161,14 +237,35 @@ public class SearchModule extends AbstractModule {
private final Set<Class<? extends PipelineAggregator.Parser>> pipelineAggParsers = new HashSet<>();
private final Highlighters highlighters = new Highlighters();
private final Suggesters suggesters = new Suggesters();
private final Set<Class<? extends ScoreFunctionParser>> functionScoreParsers = new HashSet<>();
/**
* Function score parsers constructed on registration. This is ok because
* they don't have any dependencies.
*/
private final Map<String, ScoreFunctionParser<?>> functionScoreParsers = new HashMap<>();
/**
* Query parsers constructed at configure time. These have to be constructed
* at configure time because they depend on things that are registered by
* plugins (function score parsers).
*/
private final List<Supplier<QueryParser<?>>> queryParsers = new ArrayList<>();
private final Set<Class<? extends FetchSubPhase>> fetchSubPhases = new HashSet<>();
private final Set<Class<? extends SignificanceHeuristicParser>> heuristicParsers = new HashSet<>();
private final Set<Class<? extends MovAvgModel.AbstractModelParser>> modelParsers = new HashSet<>();
private final Settings settings;
private final NamedWriteableRegistry namedWriteableRegistry;
// pkg private so tests can mock
Class<? extends SearchService> searchServiceImpl = SearchService.class;
public SearchModule(Settings settings, NamedWriteableRegistry namedWriteableRegistry) {
this.settings = settings;
this.namedWriteableRegistry = namedWriteableRegistry;
registerBuiltinFunctionScoreParsers();
registerBuiltinQueryParsers();
}
public void registerHighlighter(String key, Class<? extends Highlighter> clazz) {
highlighters.registerExtension(key, clazz);
}
@ -177,8 +274,21 @@ public class SearchModule extends AbstractModule {
suggesters.registerExtension(key, suggester);
}
public void registerFunctionScoreParser(Class<? extends ScoreFunctionParser> parser) {
functionScoreParsers.add(parser);
/**
* Register a new ScoreFunctionParser.
*/
public void registerFunctionScoreParser(ScoreFunctionParser<?> parser) {
for (String name: parser.getNames()) {
Object oldValue = functionScoreParsers.putIfAbsent(name, parser);
if (oldValue != null) {
throw new IllegalArgumentException("Function score parser [" + oldValue + "] already registered for name [" + name + "]");
}
}
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, parser.getBuilderPrototype());
}
public void registerQueryParser(Supplier<QueryParser<?>> parser) {
queryParsers.add(parser);
}
public void registerFetchSubPhase(Class<? extends FetchSubPhase> subPhase) {
@ -212,8 +322,9 @@ public class SearchModule extends AbstractModule {
configureAggs();
configureHighlighters();
configureSuggesters();
configureFunctionScore();
bind(IndicesQueriesRegistry.class).toInstance(buildQueryParserRegistry());
configureFetchSubPhase();
configureShapes();
}
protected void configureFetchSubPhase() {
@ -231,16 +342,23 @@ public class SearchModule extends AbstractModule {
bind(InnerHitsFetchSubPhase.class).asEagerSingleton();
}
protected void configureSuggesters() {
suggesters.bind(binder());
public IndicesQueriesRegistry buildQueryParserRegistry() {
Map<String, QueryParser<?>> queryParsersMap = new HashMap<>();
for (Supplier<QueryParser<?>> parserSupplier : queryParsers) {
QueryParser<?> parser = parserSupplier.get();
for (String name: parser.names()) {
Object oldValue = queryParsersMap.putIfAbsent(name, parser);
if (oldValue != null) {
throw new IllegalArgumentException("Query parser [" + oldValue + "] already registered for name [" + name + "] while trying to register [" + parser + "]");
}
}
namedWriteableRegistry.registerPrototype(QueryBuilder.class, parser.getBuilderPrototype());
}
return new IndicesQueriesRegistry(settings, queryParsersMap);
}
protected void configureFunctionScore() {
Multibinder<ScoreFunctionParser> parserMapBinder = Multibinder.newSetBinder(binder(), ScoreFunctionParser.class);
for (Class<? extends ScoreFunctionParser> clazz : functionScoreParsers) {
parserMapBinder.addBinding().to(clazz);
}
bind(ScoreFunctionParserMapper.class).asEagerSingleton();
protected void configureSuggesters() {
suggesters.bind(binder());
}
protected void configureHighlighters() {
@ -332,6 +450,87 @@ public class SearchModule extends AbstractModule {
}
}
private void configureShapes() {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
}
}
private void registerBuiltinFunctionScoreParsers() {
registerFunctionScoreParser(new ScriptScoreFunctionParser());
registerFunctionScoreParser(new GaussDecayFunctionParser());
registerFunctionScoreParser(new LinearDecayFunctionParser());
registerFunctionScoreParser(new ExponentialDecayFunctionParser());
registerFunctionScoreParser(new RandomScoreFunctionParser());
registerFunctionScoreParser(new FieldValueFactorFunctionParser());
//weight doesn't have its own parser, so every function supports it out of the box.
//Can be a single function too when not associated to any other function, which is why it needs to be registered manually here.
namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, new WeightBuilder());
}
private void registerBuiltinQueryParsers() {
registerQueryParser(MatchQueryParser::new);
registerQueryParser(MultiMatchQueryParser::new);
registerQueryParser(NestedQueryParser::new);
registerQueryParser(HasChildQueryParser::new);
registerQueryParser(HasParentQueryParser::new);
registerQueryParser(DisMaxQueryParser::new);
registerQueryParser(IdsQueryParser::new);
registerQueryParser(MatchAllQueryParser::new);
registerQueryParser(QueryStringQueryParser::new);
registerQueryParser(BoostingQueryParser::new);
BooleanQuery.setMaxClauseCount(settings.getAsInt("index.query.bool.max_clause_count", settings.getAsInt("indices.query.bool.max_clause_count", BooleanQuery.getMaxClauseCount())));
registerQueryParser(BoolQueryParser::new);
registerQueryParser(TermQueryParser::new);
registerQueryParser(TermsQueryParser::new);
registerQueryParser(FuzzyQueryParser::new);
registerQueryParser(RegexpQueryParser::new);
registerQueryParser(RangeQueryParser::new);
registerQueryParser(PrefixQueryParser::new);
registerQueryParser(WildcardQueryParser::new);
registerQueryParser(ConstantScoreQueryParser::new);
registerQueryParser(SpanTermQueryParser::new);
registerQueryParser(SpanNotQueryParser::new);
registerQueryParser(SpanWithinQueryParser::new);
registerQueryParser(SpanContainingQueryParser::new);
registerQueryParser(FieldMaskingSpanQueryParser::new);
registerQueryParser(SpanFirstQueryParser::new);
registerQueryParser(SpanNearQueryParser::new);
registerQueryParser(SpanOrQueryParser::new);
registerQueryParser(MoreLikeThisQueryParser::new);
registerQueryParser(WrapperQueryParser::new);
registerQueryParser(IndicesQueryParser::new);
registerQueryParser(CommonTermsQueryParser::new);
registerQueryParser(SpanMultiTermQueryParser::new);
// This is delayed until configure time to give plugins a chance to register parsers
registerQueryParser(() -> new FunctionScoreQueryParser(new ScoreFunctionParserMapper(functionScoreParsers)));
registerQueryParser(SimpleQueryStringParser::new);
registerQueryParser(TemplateQueryParser::new);
registerQueryParser(TypeQueryParser::new);
registerQueryParser(ScriptQueryParser::new);
registerQueryParser(GeoDistanceQueryParser::new);
registerQueryParser(GeoDistanceRangeQueryParser::new);
registerQueryParser(GeoBoundingBoxQueryParser::new);
registerQueryParser(GeohashCellQuery.Parser::new);
registerQueryParser(GeoPolygonQueryParser::new);
registerQueryParser(ExistsQueryParser::new);
registerQueryParser(MatchNoneQueryParser::new);
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerQueryParser(GeoShapeQueryParser::new);
}
// EmptyQueryBuilder is not registered as query parser but used internally.
// We need to register it with the NamedWriteableRegistry in order to serialize it
namedWriteableRegistry.registerPrototype(QueryBuilder.class, EmptyQueryBuilder.PROTOTYPE);
}
static {
// calcs
InternalAvg.registerStreams();
@ -393,5 +592,4 @@ public class SearchModule extends AbstractModule {
BucketSelectorPipelineAggregator.registerStreams();
SerialDiffPipelineAggregator.registerStreams();
}
}

View File

@ -62,7 +62,8 @@ public class GeoHashGridParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoHashGrid.TYPE, context).build();
ValuesSourceParser<ValuesSource.GeoPoint> vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoHashGrid.TYPE, context)
.build();
int precision = GeoHashGridParams.DEFAULT_PRECISION;
int requiredSize = GeoHashGridParams.DEFAULT_MAX_NUM_CELLS;
@ -131,6 +132,7 @@ public class GeoHashGridParser implements Aggregator.Parser {
final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize,
Collections.<InternalGeoHashGrid.Bucket> emptyList(), pipelineAggregators, metaData);
return new NonCollectingAggregator(name, aggregationContext, parent, pipelineAggregators, metaData) {
@Override
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -78,7 +79,7 @@ public class DateHistogramParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser vsParser = ValuesSourceParser.numeric(aggregationName, InternalDateHistogram.TYPE, context)
ValuesSourceParser<Numeric> vsParser = ValuesSourceParser.numeric(aggregationName, InternalDateHistogram.TYPE, context)
.targetValueType(ValueType.DATE)
.formattable(true)
.timezoneAware(true)
@ -190,7 +191,7 @@ public class DateHistogramParser implements Aggregator.Parser {
.timeZone(vsParser.input().timezone())
.offset(offset).build();
ValuesSourceConfig config = vsParser.config();
ValuesSourceConfig<Numeric> config = vsParser.config();
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, extendedBounds,
new InternalDateHistogram.Factory());

View File

@ -25,6 +25,7 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.aggregations.support.format.ValueParser;
import org.elasticsearch.search.internal.SearchContext;
@ -46,7 +47,7 @@ public class HistogramParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser vsParser = ValuesSourceParser.numeric(aggregationName, InternalHistogram.TYPE, context)
ValuesSourceParser<Numeric> vsParser = ValuesSourceParser.numeric(aggregationName, InternalHistogram.TYPE, context)
.targetValueType(ValueType.NUMERIC)
.formattable(true)
.build();
@ -127,7 +128,7 @@ public class HistogramParser implements Aggregator.Parser {
Rounding rounding = new Rounding.Interval(interval);
if (offset != 0) {
rounding = new Rounding.OffsetRounding((Rounding.Interval) rounding, offset);
rounding = new Rounding.OffsetRounding(rounding, offset);
}
if (extendedBounds != null) {
@ -136,7 +137,7 @@ public class HistogramParser implements Aggregator.Parser {
}
return new HistogramAggregator.Factory(aggregationName, vsParser.config(), rounding, order, keyed, minDocCount, extendedBounds,
new InternalHistogram.Factory());
new InternalHistogram.Factory<>());
}

View File

@ -81,9 +81,9 @@ public class MissingAggregator extends SingleBucketAggregator {
return new InternalMissing(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData());
}
public static class Factory extends ValuesSourceAggregatorFactory<ValuesSource> {
public static class Factory extends ValuesSourceAggregatorFactory<ValuesSource> {
public Factory(String name, ValuesSourceConfig valueSourceConfig) {
public Factory(String name, ValuesSourceConfig<ValuesSource> valueSourceConfig) {
super(name, InternalMissing.TYPE.name(), valueSourceConfig);
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -39,8 +40,7 @@ public class MissingParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, InternalMissing.TYPE, context)
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, InternalMissing.TYPE, context)
.scriptable(false)
.build();

View File

@ -203,7 +203,8 @@ public class SamplerAggregator extends SingleBucketAggregator {
private int maxDocsPerValue;
private String executionHint;
public DiversifiedFactory(String name, int shardSize, String executionHint, ValuesSourceConfig vsConfig, int maxDocsPerValue) {
public DiversifiedFactory(String name, int shardSize, String executionHint, ValuesSourceConfig<ValuesSource> vsConfig,
int maxDocsPerValue) {
super(name, InternalSampler.TYPE.name(), vsConfig);
this.shardSize = shardSize;
this.maxDocsPerValue = maxDocsPerValue;

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -55,10 +56,10 @@ public class SamplerParser implements Aggregator.Parser {
String executionHint = null;
int shardSize = DEFAULT_SHARD_SAMPLE_SIZE;
int maxDocsPerValue = MAX_DOCS_PER_VALUE_DEFAULT;
ValuesSourceParser vsParser = null;
boolean diversityChoiceMade = false;
vsParser = ValuesSourceParser.any(aggregationName, InternalSampler.TYPE, context).scriptable(true).formattable(false).build();
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, InternalSampler.TYPE, context).scriptable(true)
.formattable(false).build();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@ -88,7 +89,7 @@ public class SamplerParser implements Aggregator.Parser {
}
}
ValuesSourceConfig vsConfig = vsParser.config();
ValuesSourceConfig<ValuesSource> vsConfig = vsParser.config();
if (vsConfig.valid()) {
return new SamplerAggregator.DiversifiedFactory(aggregationName, shardSize, executionHint, vsConfig, maxDocsPerValue);
} else {

View File

@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.significant;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
@ -80,8 +79,6 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource;
IndexSearcher indexSearcher = aggregationContext.searchContext().searcher();
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsSignificantTermsAggregator(name, factories,
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext,
@ -98,9 +95,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories,
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter,
aggregationContext,
parent, termsAggregatorFactory, pipelineAggregators, metaData);
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent,
termsAggregatorFactory, pipelineAggregators, metaData);
}
};
@ -143,7 +139,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
return new TermsAggregator.BucketCountThresholds(bucketCountThresholds);
}
public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig valueSourceConfig, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> valueSourceConfig, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
String executionHint, Query filter, SignificanceHeuristic significanceHeuristic) {
super(name, SignificantStringTerms.TYPE.name(), valueSourceConfig);

View File

@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -53,7 +54,7 @@ public class SignificantTermsParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
SignificantTermsParametersParser aggParser = new SignificantTermsParametersParser(significanceHeuristicParserMapper);
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, SignificantStringTerms.TYPE, context)
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, SignificantStringTerms.TYPE, context)
.scriptable(false)
.formattable(true)
.build();

View File

@ -36,13 +36,13 @@ public abstract class AbstractTermsParametersParser {
public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
//These are the results of the parsing.
private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds();
private String executionHint = null;
private SubAggCollectionMode collectMode = SubAggCollectionMode.DEPTH_FIRST;
@ -59,12 +59,12 @@ public abstract class AbstractTermsParametersParser {
public IncludeExclude getIncludeExclude() {
return includeExclude;
}
public SubAggCollectionMode getCollectionMode() {
return collectMode;
}
public void parse(String aggregationName, XContentParser parser, SearchContext context, ValuesSourceParser vsParser, IncludeExclude.Parser incExcParser) throws IOException {
public void parse(String aggregationName, XContentParser parser, SearchContext context, ValuesSourceParser<?> vsParser, IncludeExclude.Parser incExcParser) throws IOException {
bucketCountThresholds = getDefaultBucketCountThresholds();
XContentParser.Token token;
String currentFieldName = null;

View File

@ -165,7 +165,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
private final boolean showTermDocCountError;
public TermsAggregatorFactory(String name, ValuesSourceConfig config, Terms.Order order,
public TermsAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, Terms.Order order,
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, String executionHint,
SubAggCollectionMode executionMode, boolean showTermDocCountError) {
super(name, StringTerms.TYPE.name(), config);

View File

@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketUtils;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
import org.elasticsearch.search.aggregations.bucket.terms.TermsParametersParser.OrderElement;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -45,7 +46,8 @@ public class TermsParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
TermsParametersParser aggParser = new TermsParametersParser();
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true).formattable(true).build();
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true)
.formattable(true).build();
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser();
aggParser.parse(aggregationName, parser, context, vsParser, incExcParser);

View File

@ -62,6 +62,7 @@ public abstract class ValuesSourceMetricsAggregationBuilder<B extends ValuesSour
/**
* Configure the value to use when documents miss a value.
*/
@SuppressWarnings("unchecked")
public B missing(Object missingValue) {
this.missing = missingValue;
return (B) this;

View File

@ -35,7 +35,7 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.L
private final long precisionThreshold;
CardinalityAggregatorFactory(String name, ValuesSourceConfig config, long precisionThreshold) {
CardinalityAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, long precisionThreshold) {
super(name, InternalCardinality.TYPE.name(), config);
this.precisionThreshold = precisionThreshold;
}

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -43,7 +44,7 @@ public class CardinalityParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String name, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<?> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
long precisionThreshold = -1;

View File

@ -40,7 +40,7 @@ public class ValueCountParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, InternalValueCount.TYPE, context)
ValuesSourceParser<?> vsParser = ValuesSourceParser.any(aggregationName, InternalValueCount.TYPE, context)
.build();
XContentParser.Token token;
@ -54,6 +54,6 @@ public class ValueCountParser implements Aggregator.Parser {
}
}
return new ValueCountAggregator.Factory(aggregationName, vsParser.config());
return new ValueCountAggregator.Factory<>(aggregationName, vsParser.config());
}
}

View File

@ -53,6 +53,9 @@ import org.elasticsearch.search.aggregations.support.values.ScriptLongValues;
import java.io.IOException;
/**
* How to load values for an aggregation.
*/
public abstract class ValuesSource {
/**
@ -528,6 +531,7 @@ public abstract class ValuesSource {
return indexFieldData.load(context).getBytesValues();
}
@Override
public org.elasticsearch.index.fielddata.MultiGeoPointValues geoPointValues(LeafReaderContext context) {
return indexFieldData.load(context).getGeoPointValues();
}

Some files were not shown because too many files have changed in this diff Show More