Add Force Merge API, deprecate Optimize API
This adds an API for force merging lucene segments. The `/_optimize` API is now deprecated and replaced by the `/_forcemerge` API, which has all the same flags and action, just a different name.
This commit is contained in:
parent
a0668a3b2b
commit
9ea4909035
|
@ -80,6 +80,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsA
|
|||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.get.TransportGetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction;
|
||||
|
@ -91,8 +93,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
|
|||
import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.open.TransportOpenIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
|
||||
import org.elasticsearch.action.admin.indices.optimize.TransportOptimizeAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
|
||||
|
@ -295,7 +295,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class);
|
||||
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
|
|
|
@ -17,29 +17,29 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class OptimizeAction extends Action<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
|
||||
public class ForceMergeAction extends Action<ForceMergeRequest, ForceMergeResponse, ForceMergeRequestBuilder> {
|
||||
|
||||
public static final OptimizeAction INSTANCE = new OptimizeAction();
|
||||
public static final String NAME = "indices:admin/optimize";
|
||||
public static final ForceMergeAction INSTANCE = new ForceMergeAction();
|
||||
public static final String NAME = "indices:admin/forcemerge";
|
||||
|
||||
private OptimizeAction() {
|
||||
private ForceMergeAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptimizeResponse newResponse() {
|
||||
return new OptimizeResponse();
|
||||
public ForceMergeResponse newResponse() {
|
||||
return new ForceMergeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptimizeRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new OptimizeRequestBuilder(client, this);
|
||||
public ForceMergeRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ForceMergeRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -26,17 +26,18 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to optimize one or more indices. In order to optimize on all the indices, pass an empty array or
|
||||
* <tt>null</tt> for the indices.
|
||||
* <p>
|
||||
* {@link #maxNumSegments(int)} allows to control the number of segments to optimize down to. By default, will
|
||||
* cause the optimize process to optimize down to half the configured number of segments.
|
||||
* A request to force merging the segments of one or more indices. In order to
|
||||
* run a merge on all the indices, pass an empty array or <tt>null</tt> for the
|
||||
* indices.
|
||||
* {@link #maxNumSegments(int)} allows to control the number of segments
|
||||
* to force merge down to. Defaults to simply checking if a merge needs
|
||||
* to execute, and if so, executes it
|
||||
*
|
||||
* @see org.elasticsearch.client.Requests#optimizeRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#optimize(OptimizeRequest)
|
||||
* @see OptimizeResponse
|
||||
* @see org.elasticsearch.client.Requests#forceMergeRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest)
|
||||
* @see ForceMergeResponse
|
||||
*/
|
||||
public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
||||
public class ForceMergeRequest extends BroadcastRequest<ForceMergeRequest> {
|
||||
|
||||
public static final class Defaults {
|
||||
public static final int MAX_NUM_SEGMENTS = -1;
|
||||
|
@ -49,63 +50,63 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
private boolean flush = Defaults.FLUSH;
|
||||
|
||||
/**
|
||||
* Constructs an optimization request over one or more indices.
|
||||
* Constructs a merge request over one or more indices.
|
||||
*
|
||||
* @param indices The indices to optimize, no indices passed means all indices will be optimized.
|
||||
* @param indices The indices to merge, no indices passed means all indices will be merged.
|
||||
*/
|
||||
public OptimizeRequest(String... indices) {
|
||||
public ForceMergeRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
public OptimizeRequest() {
|
||||
public ForceMergeRequest() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
|
||||
* process to optimize down to half the configured number of segments.
|
||||
* Will merge the index down to <= maxNumSegments. By default, will cause the merge
|
||||
* process to merge down to half the configured number of segments.
|
||||
*/
|
||||
public int maxNumSegments() {
|
||||
return maxNumSegments;
|
||||
}
|
||||
|
||||
/**
|
||||
* Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
|
||||
* process to optimize down to half the configured number of segments.
|
||||
* Will merge the index down to <= maxNumSegments. By default, will cause the merge
|
||||
* process to merge down to half the configured number of segments.
|
||||
*/
|
||||
public OptimizeRequest maxNumSegments(int maxNumSegments) {
|
||||
public ForceMergeRequest maxNumSegments(int maxNumSegments) {
|
||||
this.maxNumSegments = maxNumSegments;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the optimization only expunge deletes from the index, without full optimization.
|
||||
* Defaults to full optimization (<tt>false</tt>).
|
||||
* Should the merge only expunge deletes from the index, without full merging.
|
||||
* Defaults to full merging (<tt>false</tt>).
|
||||
*/
|
||||
public boolean onlyExpungeDeletes() {
|
||||
return onlyExpungeDeletes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the optimization only expunge deletes from the index, without full optimization.
|
||||
* Defaults to full optimization (<tt>false</tt>).
|
||||
* Should the merge only expunge deletes from the index, without full merge.
|
||||
* Defaults to full merging (<tt>false</tt>).
|
||||
*/
|
||||
public OptimizeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) {
|
||||
public ForceMergeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) {
|
||||
this.onlyExpungeDeletes = onlyExpungeDeletes;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should flush be performed after the optimization. Defaults to <tt>true</tt>.
|
||||
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
|
||||
*/
|
||||
public boolean flush() {
|
||||
return flush;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should flush be performed after the optimization. Defaults to <tt>true</tt>.
|
||||
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
|
||||
*/
|
||||
public OptimizeRequest flush(boolean flush) {
|
||||
public ForceMergeRequest flush(boolean flush) {
|
||||
this.flush = flush;
|
||||
return this;
|
||||
}
|
||||
|
@ -128,7 +129,7 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OptimizeRequest{" +
|
||||
return "ForceMergeRequest{" +
|
||||
"maxNumSegments=" + maxNumSegments +
|
||||
", onlyExpungeDeletes=" + onlyExpungeDeletes +
|
||||
", flush=" + flush +
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* A request to force merge one or more indices. In order to force merge all
|
||||
* indices, pass an empty array or <tt>null</tt> for the indices.
|
||||
* {@link #setMaxNumSegments(int)} allows to control the number of segments to force
|
||||
* merge down to. By default, will cause the force merge process to merge down
|
||||
* to half the configured number of segments.
|
||||
*/
|
||||
public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder<ForceMergeRequest, ForceMergeResponse, ForceMergeRequestBuilder> {
|
||||
|
||||
public ForceMergeRequestBuilder(ElasticsearchClient client, ForceMergeAction action) {
|
||||
super(client, action, new ForceMergeRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Will force merge the index down to <= maxNumSegments. By default, will
|
||||
* cause the merge process to merge down to half the configured number of
|
||||
* segments.
|
||||
*/
|
||||
public ForceMergeRequestBuilder setMaxNumSegments(int maxNumSegments) {
|
||||
request.maxNumSegments(maxNumSegments);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the merge only expunge deletes from the index, without full merging.
|
||||
* Defaults to full merging (<tt>false</tt>).
|
||||
*/
|
||||
public ForceMergeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) {
|
||||
request.onlyExpungeDeletes(onlyExpungeDeletes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should flush be performed after the merge. Defaults to <tt>true</tt>.
|
||||
*/
|
||||
public ForceMergeRequestBuilder setFlush(boolean flush) {
|
||||
request.flush(flush);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
|
@ -28,27 +28,14 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A response for optimize action.
|
||||
*
|
||||
*
|
||||
* A response for force merge action.
|
||||
*/
|
||||
public class OptimizeResponse extends BroadcastResponse {
|
||||
|
||||
OptimizeResponse() {
|
||||
public class ForceMergeResponse extends BroadcastResponse {
|
||||
|
||||
ForceMergeResponse() {
|
||||
}
|
||||
|
||||
OptimizeResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -41,18 +41,18 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Optimize index/indices action.
|
||||
* ForceMerge index/indices action.
|
||||
*/
|
||||
public class TransportOptimizeAction extends TransportBroadcastByNodeAction<OptimizeRequest, OptimizeResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
public class TransportForceMergeAction extends TransportBroadcastByNodeAction<ForceMergeRequest, ForceMergeResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportOptimizeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
public TransportForceMergeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, OptimizeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
OptimizeRequest::new, ThreadPool.Names.OPTIMIZE);
|
||||
super(settings, ForceMergeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ForceMergeRequest::new, ThreadPool.Names.FORCE_MERGE);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
|
@ -62,21 +62,21 @@ public class TransportOptimizeAction extends TransportBroadcastByNodeAction<Opti
|
|||
}
|
||||
|
||||
@Override
|
||||
protected OptimizeResponse newResponse(OptimizeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
|
||||
return new OptimizeResponse(totalShards, successfulShards, failedShards, shardFailures);
|
||||
protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
|
||||
return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected OptimizeRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final OptimizeRequest request = new OptimizeRequest();
|
||||
protected ForceMergeRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final ForceMergeRequest request = new ForceMergeRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) throws IOException {
|
||||
protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting shardRouting) throws IOException {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
|
||||
indexShard.optimize(request);
|
||||
indexShard.forceMerge(request);
|
||||
return EmptyResult.INSTANCE;
|
||||
}
|
||||
|
||||
|
@ -84,17 +84,17 @@ public class TransportOptimizeAction extends TransportBroadcastByNodeAction<Opti
|
|||
* The refresh request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected ShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) {
|
||||
protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, OptimizeRequest request) {
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, ForceMergeRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, OptimizeRequest request, String[] concreteIndices) {
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, ForceMergeRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
}
|
||||
}
|
|
@ -18,6 +18,6 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* Optimize index/indices action.
|
||||
* Force merge index/indices action.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* A request to optimize one or more indices. In order to optimize on all the indices, pass an empty array or
|
||||
* <tt>null</tt> for the indices.
|
||||
* <p>{@link #setMaxNumSegments(int)} allows to control the number of segments to optimize down to. By default, will
|
||||
* cause the optimize process to optimize down to half the configured number of segments.
|
||||
*/
|
||||
public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder<OptimizeRequest, OptimizeResponse, OptimizeRequestBuilder> {
|
||||
|
||||
public OptimizeRequestBuilder(ElasticsearchClient client, OptimizeAction action) {
|
||||
super(client, action, new OptimizeRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Will optimize the index down to <= maxNumSegments. By default, will cause the optimize
|
||||
* process to optimize down to half the configured number of segments.
|
||||
*/
|
||||
public OptimizeRequestBuilder setMaxNumSegments(int maxNumSegments) {
|
||||
request.maxNumSegments(maxNumSegments);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the optimization only expunge deletes from the index, without full optimization.
|
||||
* Defaults to full optimization (<tt>false</tt>).
|
||||
*/
|
||||
public OptimizeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) {
|
||||
request.onlyExpungeDeletes(onlyExpungeDeletes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should flush be performed after the optimization. Defaults to <tt>true</tt>.
|
||||
*/
|
||||
public OptimizeRequestBuilder setFlush(boolean flush) {
|
||||
request.flush(flush);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -64,7 +64,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, TransportUpgradeSettingsAction upgradeSettingsAction) {
|
||||
super(settings, UpgradeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeRequest::new, ThreadPool.Names.OPTIMIZE);
|
||||
super(settings, UpgradeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeRequest::new, ThreadPool.Names.FORCE_MERGE);
|
||||
this.indicesService = indicesService;
|
||||
this.upgradeSettingsAction = upgradeSettingsAction;
|
||||
}
|
||||
|
|
|
@ -53,6 +53,9 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
|
@ -63,9 +66,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
|||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
|
@ -391,28 +391,27 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
FlushRequestBuilder prepareFlush(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly optimize one or more indices into a the number of segments.
|
||||
* Explicitly force merge one or more indices into a the number of segments.
|
||||
*
|
||||
* @param request The optimize request
|
||||
* @return A result future
|
||||
* @see org.elasticsearch.client.Requests#optimizeRequest(String...)
|
||||
* @see org.elasticsearch.client.Requests#forceMergeRequest(String...)
|
||||
*/
|
||||
ActionFuture<OptimizeResponse> optimize(OptimizeRequest request);
|
||||
ActionFuture<ForceMergeResponse> forceMerge(ForceMergeRequest request);
|
||||
|
||||
/**
|
||||
* Explicitly optimize one or more indices into a the number of segments.
|
||||
* Explicitly force merge one or more indices into a the number of segments.
|
||||
*
|
||||
* @param request The optimize request
|
||||
* @param request The force merge request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#optimizeRequest(String...)
|
||||
* @see org.elasticsearch.client.Requests#forceMergeRequest(String...)
|
||||
*/
|
||||
void optimize(OptimizeRequest request, ActionListener<OptimizeResponse> listener);
|
||||
void forceMerge(ForceMergeRequest request, ActionListener<ForceMergeResponse> listener);
|
||||
|
||||
/**
|
||||
* Explicitly optimize one or more indices into a the number of segments.
|
||||
* Explicitly force mergee one or more indices into a the number of segments.
|
||||
*/
|
||||
OptimizeRequestBuilder prepareOptimize(String... indices);
|
||||
|
||||
ForceMergeRequestBuilder prepareForceMerge(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly upgrade one or more indices
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsReques
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
|
@ -279,14 +279,14 @@ public class Requests {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates an optimize request.
|
||||
* Creates a force merge request.
|
||||
*
|
||||
* @param indices The indices to optimize. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
|
||||
* @return The optimize request
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#optimize(org.elasticsearch.action.admin.indices.optimize.OptimizeRequest)
|
||||
* @param indices The indices to force merge. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
|
||||
* @return The force merge request
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#forceMerge(org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest)
|
||||
*/
|
||||
public static OptimizeRequest optimizeRequest(String... indices) {
|
||||
return new OptimizeRequest(indices);
|
||||
public static ForceMergeRequest forceMergeRequest(String... indices) {
|
||||
return new ForceMergeRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -139,6 +139,10 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
|
||||
|
@ -152,10 +156,6 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
|
|||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequestBuilder;
|
||||
|
@ -1382,18 +1382,18 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<OptimizeResponse> optimize(final OptimizeRequest request) {
|
||||
return execute(OptimizeAction.INSTANCE, request);
|
||||
public ActionFuture<ForceMergeResponse> forceMerge(final ForceMergeRequest request) {
|
||||
return execute(ForceMergeAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void optimize(final OptimizeRequest request, final ActionListener<OptimizeResponse> listener) {
|
||||
execute(OptimizeAction.INSTANCE, request, listener);
|
||||
public void forceMerge(final ForceMergeRequest request, final ActionListener<ForceMergeResponse> listener) {
|
||||
execute(ForceMergeAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptimizeRequestBuilder prepareOptimize(String... indices) {
|
||||
return new OptimizeRequestBuilder(this, OptimizeAction.INSTANCE).setIndices(indices);
|
||||
public ForceMergeRequestBuilder prepareForceMerge(String... indices) {
|
||||
return new ForceMergeRequestBuilder(this, ForceMergeAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -246,7 +246,7 @@ public final class EngineConfig {
|
|||
|
||||
/**
|
||||
* Returns a thread-pool mainly used to get estimated time stamps from {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule
|
||||
* async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#OPTIMIZE} thread-pool
|
||||
* async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool
|
||||
*/
|
||||
public ThreadPool getThreadPool() {
|
||||
return threadPool;
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
|
@ -644,12 +644,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
|
|||
|
||||
}
|
||||
|
||||
public void optimize(OptimizeRequest optimize) throws IOException {
|
||||
public void forceMerge(ForceMergeRequest forceMerge) throws IOException {
|
||||
verifyStarted();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("optimize with {}", optimize);
|
||||
logger.trace("force merge with {}", forceMerge);
|
||||
}
|
||||
getEngine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false);
|
||||
getEngine().forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(),
|
||||
forceMerge.onlyExpungeDeletes(), false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -57,12 +57,12 @@ import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExi
|
|||
import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.optimize.RestOptimizeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
|
||||
|
@ -200,7 +200,7 @@ public class RestActionModule extends AbstractModule {
|
|||
bind(RestRefreshAction.class).asEagerSingleton();
|
||||
bind(RestFlushAction.class).asEagerSingleton();
|
||||
bind(RestSyncedFlushAction.class).asEagerSingleton();
|
||||
bind(RestOptimizeAction.class).asEagerSingleton();
|
||||
bind(RestForceMergeAction.class).asEagerSingleton();
|
||||
bind(RestUpgradeAction.class).asEagerSingleton();
|
||||
bind(RestClearIndicesCacheAction.class).asEagerSingleton();
|
||||
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.indices.optimize;
|
||||
package org.elasticsearch.rest.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -38,11 +38,18 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class RestOptimizeAction extends BaseRestHandler {
|
||||
public class RestForceMergeAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestOptimizeAction(Settings settings, RestController controller, Client client) {
|
||||
public RestForceMergeAction(Settings settings, RestController controller, Client client) {
|
||||
super(settings, controller, client);
|
||||
controller.registerHandler(POST, "/_forcemerge", this);
|
||||
controller.registerHandler(POST, "/{index}/_forcemerge", this);
|
||||
|
||||
controller.registerHandler(GET, "/_forcemerge", this);
|
||||
controller.registerHandler(GET, "/{index}/_forcemerge", this);
|
||||
|
||||
// TODO: Remove for 3.0
|
||||
controller.registerHandler(POST, "/_optimize", this);
|
||||
controller.registerHandler(POST, "/{index}/_optimize", this);
|
||||
|
||||
|
@ -52,14 +59,14 @@ public class RestOptimizeAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
OptimizeRequest optimizeRequest = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
optimizeRequest.indicesOptions(IndicesOptions.fromRequest(request, optimizeRequest.indicesOptions()));
|
||||
optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments()));
|
||||
optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes()));
|
||||
optimizeRequest.flush(request.paramAsBoolean("flush", optimizeRequest.flush()));
|
||||
client.admin().indices().optimize(optimizeRequest, new RestBuilderListener<OptimizeResponse>(channel) {
|
||||
ForceMergeRequest mergeRequest = new ForceMergeRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
mergeRequest.indicesOptions(IndicesOptions.fromRequest(request, mergeRequest.indicesOptions()));
|
||||
mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments()));
|
||||
mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes()));
|
||||
mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush()));
|
||||
client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener<ForceMergeResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(OptimizeResponse response, XContentBuilder builder) throws Exception {
|
||||
public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
buildBroadcastShardsHeader(builder, request, response);
|
||||
builder.endObject();
|
|
@ -62,7 +62,7 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
ThreadPool.Names.GET,
|
||||
ThreadPool.Names.INDEX,
|
||||
ThreadPool.Names.MANAGEMENT,
|
||||
ThreadPool.Names.OPTIMIZE,
|
||||
ThreadPool.Names.FORCE_MERGE,
|
||||
ThreadPool.Names.PERCOLATE,
|
||||
ThreadPool.Names.REFRESH,
|
||||
ThreadPool.Names.SEARCH,
|
||||
|
@ -78,7 +78,7 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
"g",
|
||||
"i",
|
||||
"ma",
|
||||
"o",
|
||||
"fm",
|
||||
"p",
|
||||
"r",
|
||||
"s",
|
||||
|
|
|
@ -81,7 +81,7 @@ public class ThreadPool extends AbstractComponent {
|
|||
public static final String REFRESH = "refresh";
|
||||
public static final String WARMER = "warmer";
|
||||
public static final String SNAPSHOT = "snapshot";
|
||||
public static final String OPTIMIZE = "optimize";
|
||||
public static final String FORCE_MERGE = "force_merge";
|
||||
public static final String FETCH_SHARD_STARTED = "fetch_shard_started";
|
||||
public static final String FETCH_SHARD_STORE = "fetch_shard_store";
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ public class ThreadPool extends AbstractComponent {
|
|||
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build());
|
||||
defaultExecutorTypeSettings.put(Names.SNAPSHOT,
|
||||
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build());
|
||||
defaultExecutorTypeSettings.put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build());
|
||||
defaultExecutorTypeSettings.put(Names.FORCE_MERGE, settingsBuilder().put("type", "fixed").put("size", 1).build());
|
||||
defaultExecutorTypeSettings.put(Names.FETCH_SHARD_STARTED,
|
||||
settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", availableProcessors * 2).build());
|
||||
defaultExecutorTypeSettings.put(Names.FETCH_SHARD_STORE,
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.admin.indices.forcemerge;
|
||||
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -31,10 +31,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
|
||||
public class OptimizeBlocksIT extends ESIntegTestCase {
|
||||
public class ForceMergeBlocksIT extends ESIntegTestCase {
|
||||
|
||||
@Test
|
||||
public void testOptimizeWithBlocks() {
|
||||
public void testForceMergeWithBlocks() {
|
||||
createIndex("test");
|
||||
ensureGreen("test");
|
||||
|
||||
|
@ -49,7 +49,7 @@ public class OptimizeBlocksIT extends ESIntegTestCase {
|
|||
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
|
||||
try {
|
||||
enableIndexBlock("test", blockSetting);
|
||||
OptimizeResponse response = client().admin().indices().prepareOptimize("test").execute().actionGet();
|
||||
ForceMergeResponse response = client().admin().indices().prepareForceMerge("test").execute().actionGet();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
} finally {
|
||||
|
@ -61,22 +61,22 @@ public class OptimizeBlocksIT extends ESIntegTestCase {
|
|||
for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
|
||||
try {
|
||||
enableIndexBlock("test", blockSetting);
|
||||
assertBlocked(client().admin().indices().prepareOptimize("test"));
|
||||
assertBlocked(client().admin().indices().prepareForceMerge("test"));
|
||||
} finally {
|
||||
disableIndexBlock("test", blockSetting);
|
||||
}
|
||||
}
|
||||
|
||||
// Optimizing all indices is blocked when the cluster is read-only
|
||||
// Merging all indices is blocked when the cluster is read-only
|
||||
try {
|
||||
OptimizeResponse response = client().admin().indices().prepareOptimize().execute().actionGet();
|
||||
ForceMergeResponse response = client().admin().indices().prepareForceMerge().execute().actionGet();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
|
||||
setClusterReadOnly(true);
|
||||
assertBlocked(client().admin().indices().prepareOptimize());
|
||||
assertBlocked(client().admin().indices().prepareForceMerge());
|
||||
} finally {
|
||||
setClusterReadOnly(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -143,7 +143,7 @@ public class TimeDataHistogramAggregationBenchmark {
|
|||
System.out.println("Time range 1: " + (currentTimeInMillis1[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
|
||||
System.out.println("Time range 2: " + (currentTimeInMillis2[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
|
||||
System.out.println("--> optimizing index");
|
||||
client.admin().indices().prepareOptimize().setMaxNumSegments(1).get();
|
||||
client.admin().indices().prepareForceMerge().setMaxNumSegments(1).get();
|
||||
} catch (IndexAlreadyExistsException e) {
|
||||
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
|
||||
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.document;
|
|||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
|
@ -81,10 +81,10 @@ public class DocumentActionsIT extends ESIntegTestCase {
|
|||
assertNoFailures(clearIndicesCacheResponse);
|
||||
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
|
||||
logger.info("Optimizing");
|
||||
logger.info("Force Merging");
|
||||
waitForRelocation(ClusterHealthStatus.GREEN);
|
||||
OptimizeResponse optimizeResponse = optimize();
|
||||
assertThat(optimizeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
ForceMergeResponse mergeResponse = forceMerge();
|
||||
assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
|
||||
GetResponse getResult;
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
}
|
||||
logger.info("Running Cluster Health");
|
||||
ensureGreen();
|
||||
client().admin().indices().prepareOptimize("test").setMaxNumSegments(100).get(); // just wait for merges
|
||||
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); // just wait for merges
|
||||
client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get();
|
||||
|
||||
boolean useSyncIds = randomBoolean();
|
||||
|
|
|
@ -27,9 +27,9 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder
|
|||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
|
||||
|
@ -78,7 +78,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1", "test2"),true);
|
||||
verify(segments("test1", "test2"), true);
|
||||
verify(stats("test1", "test2"), true);
|
||||
verify(optimize("test1", "test2"), true);
|
||||
verify(forceMerge("test1", "test2"), true);
|
||||
verify(refresh("test1", "test2"), true);
|
||||
verify(validateQuery("test1", "test2"), true);
|
||||
verify(aliasExists("test1", "test2"), true);
|
||||
|
@ -99,7 +99,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1", "test2").setIndicesOptions(options),true);
|
||||
verify(segments("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(stats("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(optimize("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(forceMerge("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(refresh("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(validateQuery("test1", "test2").setIndicesOptions(options), true);
|
||||
verify(aliasExists("test1", "test2").setIndicesOptions(options), true);
|
||||
|
@ -120,7 +120,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(segments("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(stats("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(optimize("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(refresh("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
|
||||
|
@ -143,7 +143,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1", "test2").setIndicesOptions(options),false);
|
||||
verify(segments("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(stats("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(optimize("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(refresh("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
|
||||
|
@ -175,7 +175,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),true);
|
||||
verify(segments("test1").setIndicesOptions(options), true);
|
||||
verify(stats("test1").setIndicesOptions(options), true);
|
||||
verify(optimize("test1").setIndicesOptions(options), true);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), true);
|
||||
verify(refresh("test1").setIndicesOptions(options), true);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), true);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), true);
|
||||
|
@ -196,7 +196,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),false);
|
||||
verify(segments("test1").setIndicesOptions(options), false);
|
||||
verify(stats("test1").setIndicesOptions(options), false);
|
||||
verify(optimize("test1").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), false);
|
||||
verify(refresh("test1").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), false);
|
||||
|
@ -220,7 +220,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),false);
|
||||
verify(segments("test1").setIndicesOptions(options), false);
|
||||
verify(stats("test1").setIndicesOptions(options), false);
|
||||
verify(optimize("test1").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), false);
|
||||
verify(refresh("test1").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), false);
|
||||
|
@ -244,7 +244,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),true);
|
||||
verify(segments("test1").setIndicesOptions(options), true);
|
||||
verify(stats("test1").setIndicesOptions(options), true);
|
||||
verify(optimize("test1").setIndicesOptions(options), true);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), true);
|
||||
verify(refresh("test1").setIndicesOptions(options), true);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), true);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), true);
|
||||
|
@ -264,7 +264,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),false);
|
||||
verify(segments("test1").setIndicesOptions(options), false);
|
||||
verify(stats("test1").setIndicesOptions(options), false);
|
||||
verify(optimize("test1").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), false);
|
||||
verify(refresh("test1").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), false);
|
||||
|
@ -287,7 +287,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush("test1").setIndicesOptions(options),false);
|
||||
verify(segments("test1").setIndicesOptions(options), false);
|
||||
verify(stats("test1").setIndicesOptions(options), false);
|
||||
verify(optimize("test1").setIndicesOptions(options), false);
|
||||
verify(forceMerge("test1").setIndicesOptions(options), false);
|
||||
verify(refresh("test1").setIndicesOptions(options), false);
|
||||
verify(validateQuery("test1").setIndicesOptions(options), false);
|
||||
verify(aliasExists("test1").setIndicesOptions(options), false);
|
||||
|
@ -342,7 +342,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush(indices),false);
|
||||
verify(segments(indices), false);
|
||||
verify(stats(indices), false);
|
||||
verify(optimize(indices), false);
|
||||
verify(forceMerge(indices), false);
|
||||
verify(refresh(indices), false);
|
||||
verify(validateQuery(indices), true);
|
||||
verify(aliasExists(indices), false);
|
||||
|
@ -364,7 +364,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush(indices).setIndicesOptions(options),false);
|
||||
verify(segments(indices).setIndicesOptions(options), false);
|
||||
verify(stats(indices).setIndicesOptions(options), false);
|
||||
verify(optimize(indices).setIndicesOptions(options), false);
|
||||
verify(forceMerge(indices).setIndicesOptions(options), false);
|
||||
verify(refresh(indices).setIndicesOptions(options), false);
|
||||
verify(validateQuery(indices).setIndicesOptions(options), false);
|
||||
verify(aliasExists(indices).setIndicesOptions(options), false);
|
||||
|
@ -389,7 +389,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush(indices),false);
|
||||
verify(segments(indices), false);
|
||||
verify(stats(indices), false);
|
||||
verify(optimize(indices), false);
|
||||
verify(forceMerge(indices), false);
|
||||
verify(refresh(indices), false);
|
||||
verify(validateQuery(indices), false);
|
||||
verify(aliasExists(indices), false);
|
||||
|
@ -411,7 +411,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush(indices),false);
|
||||
verify(segments(indices), false);
|
||||
verify(stats(indices), false);
|
||||
verify(optimize(indices), false);
|
||||
verify(forceMerge(indices), false);
|
||||
verify(refresh(indices), false);
|
||||
verify(validateQuery(indices), true);
|
||||
verify(aliasExists(indices), false);
|
||||
|
@ -433,7 +433,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(_flush(indices).setIndicesOptions(options),false);
|
||||
verify(segments(indices).setIndicesOptions(options), false);
|
||||
verify(stats(indices).setIndicesOptions(options), false);
|
||||
verify(optimize(indices).setIndicesOptions(options), false);
|
||||
verify(forceMerge(indices).setIndicesOptions(options), false);
|
||||
verify(refresh(indices).setIndicesOptions(options), false);
|
||||
verify(validateQuery(indices).setIndicesOptions(options), false);
|
||||
verify(aliasExists(indices).setIndicesOptions(options), false);
|
||||
|
@ -788,8 +788,8 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
return client().admin().indices().prepareStats(indices);
|
||||
}
|
||||
|
||||
private static OptimizeRequestBuilder optimize(String... indices) {
|
||||
return client().admin().indices().prepareOptimize(indices);
|
||||
private static ForceMergeRequestBuilder forceMerge(String... indices) {
|
||||
return client().admin().indices().prepareForceMerge(indices);
|
||||
}
|
||||
|
||||
private static RefreshRequestBuilder refresh(String... indices) {
|
||||
|
|
|
@ -226,7 +226,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
|
||||
// Optimize does a waitForMerges, which we must do to make sure all in-flight (throttled) merges finish:
|
||||
logger.info("test: optimize");
|
||||
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get();
|
||||
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get();
|
||||
logger.info("test: optimize done");
|
||||
|
||||
// Record current throttling so far
|
||||
|
@ -264,7 +264,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
// when ESIntegTestCase.after tries to remove indices created by the test:
|
||||
|
||||
// Wait for merges to finish
|
||||
client().admin().indices().prepareOptimize("test").get();
|
||||
client().admin().indices().prepareForceMerge("test").get();
|
||||
flush();
|
||||
|
||||
logger.info("test: test done");
|
||||
|
|
|
@ -356,7 +356,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
// Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
|
||||
// when ESIntegTestCase.after tries to remove indices created by the test:
|
||||
logger.info("test: now optimize");
|
||||
client().admin().indices().prepareOptimize("test").get();
|
||||
client().admin().indices().prepareForceMerge("test").get();
|
||||
flush();
|
||||
logger.info("test: test done");
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
client().prepareIndex("test1", "type2", Integer.toString(i)).setSource("field", "value").execute().actionGet();
|
||||
client().admin().indices().prepareFlush().execute().actionGet();
|
||||
}
|
||||
client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
|
||||
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
|
||||
stats = client().admin().indices().prepareStats()
|
||||
.setMerge(true)
|
||||
.execute().actionGet();
|
||||
|
@ -546,7 +546,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
assertThat(stats.getTotal().getSegments().getVersionMapMemoryInBytes(), greaterThan(0l));
|
||||
|
||||
client().admin().indices().prepareFlush().get();
|
||||
client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
|
||||
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
|
||||
stats = client().admin().indices().prepareStats().setSegments(true).get();
|
||||
|
||||
assertThat(stats.getTotal().getSegments(), notNullValue());
|
||||
|
|
|
@ -115,7 +115,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
// ensure we have flushed segments and make them a big one via optimize
|
||||
client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).get();
|
||||
client().admin().indices().prepareOptimize().setMaxNumSegments(1).setFlush(true).get();
|
||||
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get();
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final AtomicBoolean truncate = new AtomicBoolean(true);
|
||||
|
@ -155,4 +155,4 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1391,7 +1391,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
|
|||
client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
|
||||
client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
|
||||
client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
|
||||
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setFlush(true).get();
|
||||
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(true).get();
|
||||
client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
|
||||
client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
|
||||
client().prepareIndex("test", "child", "c4").setParent("p3").setSource("c_field", "green").get();
|
||||
|
|
|
@ -22,7 +22,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
|
|||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
|
@ -949,7 +949,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
if (optimize) {
|
||||
// make sure merging works just fine
|
||||
client().admin().indices().prepareFlush(INDEX).execute().actionGet();
|
||||
client().admin().indices().prepareOptimize(INDEX).setMaxNumSegments(randomIntBetween(1, 5)).get();
|
||||
client().admin().indices().prepareForceMerge(INDEX).setMaxNumSegments(randomIntBetween(1, 5)).get();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -967,7 +967,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
.field("somefield", "somevalue")
|
||||
.endObject()
|
||||
).get(); // we have 2 docs in a segment...
|
||||
OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
|
||||
ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setFlush(true).setMaxNumSegments(1).execute().actionGet();
|
||||
assertAllSuccessful(actionGet);
|
||||
refresh();
|
||||
// update the first one and then merge.. the target segment will have no value in FIELD
|
||||
|
@ -976,7 +976,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
.field("somefield", "somevalue")
|
||||
.endObject()
|
||||
).get();
|
||||
actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
|
||||
actionGet = client().admin().indices().prepareForceMerge().setFlush(true).setMaxNumSegments(1).execute().actionGet();
|
||||
assertAllSuccessful(actionGet);
|
||||
refresh();
|
||||
|
||||
|
|
|
@ -1605,7 +1605,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
}
|
||||
indexRandom(true, builders);
|
||||
flushAndRefresh();
|
||||
assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setMaxNumSegments(1).get());
|
||||
assertNoFailures(client().admin().indices().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).get());
|
||||
|
||||
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
|
||||
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
|
@ -2092,4 +2092,4 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertThat(ex.getMessage(), containsString("Invalid snapshot name"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,7 +185,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||
}
|
||||
indexRandom(true, builders);
|
||||
flushAndRefresh();
|
||||
assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setMaxNumSegments(1).get());
|
||||
assertNoFailures(client().admin().indices().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).get());
|
||||
|
||||
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
|
||||
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
|||
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
|
||||
|
@ -1250,11 +1250,11 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Waits for all relocations and optimized all indices in the cluster to 1 segment.
|
||||
* Waits for all relocations and force merge all indices in the cluster to 1 segment.
|
||||
*/
|
||||
protected OptimizeResponse optimize() {
|
||||
protected ForceMergeResponse forceMerge() {
|
||||
waitForRelocation();
|
||||
OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
|
||||
ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
|
||||
assertNoFailures(actionGet);
|
||||
return actionGet;
|
||||
}
|
||||
|
@ -1472,7 +1472,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Maybe refresh, optimize, or flush then always make sure there aren't too many in flight async operations.
|
||||
* Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations.
|
||||
*/
|
||||
private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlightAsyncOperations, boolean maybeFlush) throws InterruptedException {
|
||||
if (rarely()) {
|
||||
|
@ -1488,8 +1488,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
|
||||
}
|
||||
} else if (rarely()) {
|
||||
client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
|
||||
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
|
||||
client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
|
||||
new LatchedActionListener<ForceMergeResponse>(newLatch(inFlightAsyncOperations)));
|
||||
}
|
||||
}
|
||||
while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) {
|
||||
|
|
|
@ -409,7 +409,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
if (random.nextBoolean()) {
|
||||
// change threadpool types to make sure we don't have components that rely on the type of thread pools
|
||||
for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
|
||||
ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.OPTIMIZE,
|
||||
ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.FORCE_MERGE,
|
||||
ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
|
||||
ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
|
||||
if (random.nextBoolean()) {
|
||||
|
|
|
@ -73,7 +73,7 @@ public class ThreadPoolStatsTests extends ESTestCase {
|
|||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.OPTIMIZE, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.PERCOLATE, -1, 0, 0, 0, 0, 0L));
|
||||
stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L));
|
||||
|
||||
|
@ -105,8 +105,8 @@ public class ThreadPoolStatsTests extends ESTestCase {
|
|||
parser.skipChildren();
|
||||
token = parser.nextToken();
|
||||
}
|
||||
assertThat(names, contains(ThreadPool.Names.GENERIC,
|
||||
ThreadPool.Names.OPTIMIZE,
|
||||
assertThat(names, contains(ThreadPool.Names.FORCE_MERGE,
|
||||
ThreadPool.Names.GENERIC,
|
||||
ThreadPool.Names.PERCOLATE,
|
||||
ThreadPool.Names.SAME,
|
||||
ThreadPool.Names.SEARCH,
|
||||
|
|
|
@ -161,13 +161,13 @@ public class TribeIT extends ESIntegTestCase {
|
|||
// all is well!
|
||||
}
|
||||
try {
|
||||
tribeClient.admin().indices().prepareOptimize("test1").execute().actionGet();
|
||||
tribeClient.admin().indices().prepareForceMerge("test1").execute().actionGet();
|
||||
fail("cluster block should be thrown");
|
||||
} catch (ClusterBlockException e) {
|
||||
// all is well!
|
||||
}
|
||||
try {
|
||||
tribeClient.admin().indices().prepareOptimize("test2").execute().actionGet();
|
||||
tribeClient.admin().indices().prepareForceMerge("test2").execute().actionGet();
|
||||
fail("cluster block should be thrown");
|
||||
} catch (ClusterBlockException e) {
|
||||
// all is well!
|
||||
|
|
|
@ -170,7 +170,7 @@ public class SimpleTTLIT extends ESIntegTestCase {
|
|||
if (rarely()) {
|
||||
client().admin().indices().prepareFlush("test").get();
|
||||
} else if (rarely()) {
|
||||
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get();
|
||||
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get();
|
||||
}
|
||||
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
|
||||
// TTL deletes two docs, but it is indexed in the primary shard and replica shard.
|
||||
|
|
|
@ -57,7 +57,7 @@ Currently available <<modules-threadpool,thread pools>>:
|
|||
|`get` |`g` |Thread pool used for <<docs-get,get>> operations
|
||||
|`index` |`i` |Thread pool used for <<docs-index_,index>>/<<docs-delete,delete>> operations
|
||||
|`management` |`ma` |Thread pool used for management of Elasticsearch (e.g. cluster management)
|
||||
|`optimize` |`o` |Thread pool used for <<indices-optimize,optimize>> operations
|
||||
|`force_merge` |`fm` |Thread pool used for <<indices-forcemerge,force merge>> operations
|
||||
|`percolate` |`p` |Thread pool used for <<search-percolate,percolator>> operations
|
||||
|`refresh` |`r` |Thread pool used for <<indices-refresh,refresh>> operations
|
||||
|`search` |`s` |Thread pool used for <<search-search,search>>/<<search-count,count>> operations
|
||||
|
@ -107,4 +107,4 @@ other details like the `ip` of the responding node(s).
|
|||
|`host` |`h` |The hostname for the current node
|
||||
|`ip` |`i` |The IP address for the current node
|
||||
|`port` |`po` |The bound transport port for the current node
|
||||
|=======================================================================
|
||||
|=======================================================================
|
||||
|
|
|
@ -59,7 +59,7 @@ and warmers.
|
|||
* <<indices-clearcache>>
|
||||
* <<indices-refresh>>
|
||||
* <<indices-flush>>
|
||||
* <<indices-optimize>>
|
||||
* <<indices-forcemerge>>
|
||||
* <<indices-upgrade>>
|
||||
|
||||
--
|
||||
|
@ -110,7 +110,7 @@ include::indices/flush.asciidoc[]
|
|||
|
||||
include::indices/refresh.asciidoc[]
|
||||
|
||||
include::indices/optimize.asciidoc[]
|
||||
include::indices/forcemerge.asciidoc[]
|
||||
|
||||
include::indices/upgrade.asciidoc[]
|
||||
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
[[indices-forcemerge]]
|
||||
== Force Merge
|
||||
|
||||
The force merge API allows to force merging of one or more indices through an
|
||||
API. The merge relates to the number of segments a Lucene index holds within
|
||||
each shard. The force merge operation allows to reduce the number of segments by
|
||||
merging them.
|
||||
|
||||
This call will block until the merge is complete. If the http connection is
|
||||
lost, the request will continue in the background, and any new requests will
|
||||
block until the previous force merge is complete.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge'
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[forcemerge-parameters]]
|
||||
=== Request Parameters
|
||||
|
||||
The force merge API accepts the following request parameters:
|
||||
|
||||
[horizontal]
|
||||
`max_num_segments`:: The number of segments to merge to. To fully
|
||||
merge the index, set it to `1`. Defaults to simply checking if a
|
||||
merge needs to execute, and if so, executes it.
|
||||
|
||||
`only_expunge_deletes`:: Should the merge process only expunge segments with
|
||||
deletes in it. In Lucene, a document is not deleted from a segment, just marked
|
||||
as deleted. During a merge process of segments, a new segment is created that
|
||||
does not have those deletes. This flag allows to only merge segments that have
|
||||
deletes. Defaults to `false`. Note that this won't override the
|
||||
`index.merge.policy.expunge_deletes_allowed` threshold.
|
||||
|
||||
`flush`:: Should a flush be performed after the forced merge. Defaults to
|
||||
`true`.
|
||||
|
||||
[float]
|
||||
[[forcemerge-multi-index]]
|
||||
=== Multi Index
|
||||
|
||||
The force merge API can be applied to more than one index with a single call, or
|
||||
even on `_all` the indices.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge'
|
||||
|
||||
$ curl -XPOST 'http://localhost:9200/_forcemerge'
|
||||
--------------------------------------------------
|
|
@ -1,6 +1,8 @@
|
|||
[[indices-optimize]]
|
||||
== Optimize
|
||||
|
||||
deprecated[2.1.0,Optimize API has been renamed to the force merge API]
|
||||
|
||||
The optimize API allows to optimize one or more indices through an API.
|
||||
The optimize process basically optimizes the index for faster search
|
||||
operations (and relates to the number of segments a Lucene index holds
|
||||
|
|
|
@ -63,11 +63,11 @@ curl -XPUT localhost:9200/test/_settings -d '{
|
|||
} }'
|
||||
--------------------------------------------------
|
||||
|
||||
And, an optimize should be called:
|
||||
And, a force merge should be called:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPOST 'http://localhost:9200/test/_optimize?max_num_segments=5'
|
||||
curl -XPOST 'http://localhost:9200/test/_forcemerge?max_num_segments=5'
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
|
|
|
@ -70,7 +70,7 @@ $ curl -XPOST 'http://localhost:9200/twitter/_upgrade'
|
|||
|
||||
NOTE: Upgrading is an I/O intensive operation, and is limited to processing a
|
||||
single shard per node at a time. It also is not allowed to run at the same
|
||||
time as optimize.
|
||||
time as an optimize/force-merge.
|
||||
|
||||
This call will block until the upgrade is complete. If the http connection
|
||||
is lost, the request will continue in the background, and
|
||||
|
@ -131,4 +131,4 @@ curl 'http://localhost:9200/twitter/_upgrade?pretty&human'
|
|||
The level of details in the upgrade status command can be controlled by
|
||||
setting `level` parameter to `cluster`, `index` (default) or `shard` levels.
|
||||
For example, you can run the upgrade status command with `level=shard` to
|
||||
get detailed upgrade information of each individual shard.
|
||||
get detailed upgrade information of each individual shard.
|
||||
|
|
|
@ -65,4 +65,10 @@ MoreLikeThisBuilder#addLikeItem.
|
|||
|
||||
If sorting on field inside a nested object then the `nested_path` should be specified.
|
||||
Before there was an attempt to resolve the nested path automatically, but that was sometimes incorrect.
|
||||
To avoid confusion the `nested_path` should always be specified.
|
||||
To avoid confusion the `nested_path` should always be specified.
|
||||
|
||||
=== Deprecations
|
||||
|
||||
==== Optimize API
|
||||
|
||||
The Optimize API has been deprecated, all new optimize actions should use the new Force Merge API.
|
||||
|
|
|
@ -81,7 +81,7 @@ There are a couple of helper methods in `ESIntegTestCase`, which will make your
|
|||
`createIndex(name)`:: Creates an index with the specified name
|
||||
`flush()`:: Flushes all indices in a cluster
|
||||
`flushAndRefresh()`:: Combines `flush()` and `refresh()` calls
|
||||
`optimize()`:: Waits for all relocations and optimized all indices in the cluster to one segment.
|
||||
`forceMerge()`:: Waits for all relocations and force merges all indices in the cluster to one segment.
|
||||
`indexExists(name)`:: Checks if given index exists
|
||||
`admin()`:: Returns an `AdminClient` for administrative tasks
|
||||
`clusterService()`:: Returns the cluster service java class
|
||||
|
|
|
@ -31,6 +31,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
|
||||
|
@ -39,8 +41,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
|
|||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
|
@ -388,15 +388,15 @@ public class IndicesRequestTests extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testOptimize() {
|
||||
String optimizeShardAction = OptimizeAction.NAME + "[n]";
|
||||
interceptTransportActions(optimizeShardAction);
|
||||
public void testForceMerge() {
|
||||
String mergeShardAction = ForceMergeAction.NAME + "[n]";
|
||||
interceptTransportActions(mergeShardAction);
|
||||
|
||||
OptimizeRequest optimizeRequest = new OptimizeRequest(randomIndicesOrAliases());
|
||||
internalCluster().clientNodeClient().admin().indices().optimize(optimizeRequest).actionGet();
|
||||
ForceMergeRequest mergeRequest = new ForceMergeRequest(randomIndicesOrAliases());
|
||||
internalCluster().clientNodeClient().admin().indices().forceMerge(mergeRequest).actionGet();
|
||||
|
||||
clearInterceptedActions();
|
||||
assertSameIndices(optimizeRequest, optimizeShardAction);
|
||||
assertSameIndices(mergeRequest, mergeShardAction);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -331,8 +331,8 @@ public class SimpleSortTests extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
|
||||
assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
|
||||
|
||||
// optimize
|
||||
optimize();
|
||||
// force merge
|
||||
forceMerge();
|
||||
refresh();
|
||||
|
||||
client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
|
||||
|
|
|
@ -172,7 +172,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
client().prepareIndex("test", "type1", "3").setSource("field1", "value3").get();
|
||||
ensureGreen();
|
||||
waitForRelocation();
|
||||
optimize();
|
||||
forceMerge();
|
||||
refresh();
|
||||
assertHitCount(
|
||||
client().prepareSearch()
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
{
|
||||
"indices.forcemerge": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html",
|
||||
"methods": ["POST", "GET"],
|
||||
"url": {
|
||||
"path": "/_forcemerge",
|
||||
"paths": ["/_forcemerge", "/{index}/_forcemerge"],
|
||||
"parts": {
|
||||
"index": {
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"flush": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the index should be flushed after performing the operation (default: true)"
|
||||
},
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices": {
|
||||
"type" : "boolean",
|
||||
"description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"max_num_segments": {
|
||||
"type" : "number",
|
||||
"description" : "The number of segments the index should be merged into (default: dynamic)"
|
||||
},
|
||||
"only_expunge_deletes": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the operation should only expunge deleted documents"
|
||||
},
|
||||
"operation_threading": {
|
||||
"description" : "TODO: ?"
|
||||
},
|
||||
"wait_for_merge": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether the request should block until the merge process is finished (default: true)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -29,13 +29,13 @@
|
|||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
h: id,ba,fa,gea,ga,ia,maa,ma,oa,pa
|
||||
h: id,ba,fa,gea,ga,ia,maa,ma,fma,pa
|
||||
v: true
|
||||
full_id: true
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ oa \s+ pa \s+ \n
|
||||
/^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ fma \s+ pa \s+ \n
|
||||
(\S+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \n)+ $/
|
||||
|
||||
- do:
|
||||
|
@ -100,12 +100,12 @@
|
|||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
h: id,optimize.type,optimize.active,optimize.size,optimize.queue,optimize.queueSize,optimize.rejected,optimize.largest,optimize.completed,optimize.min,optimize.max,optimize.keepAlive
|
||||
h: id,force_merge.type,force_merge.active,force_merge.size,force_merge.queue,force_merge.queueSize,force_merge.rejected,force_merge.largest,force_merge.completed,force_merge.min,force_merge.max,force_merge.keepAlive
|
||||
v: true
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^ id \s+ optimize.type \s+ optimize.active \s+ optimize.size \s+ optimize.queue \s+ optimize.queueSize \s+ optimize.rejected \s+ optimize.largest \s+ optimize.completed \s+ optimize.min \s+ optimize.max \s+ optimize.keepAlive \s+ \n
|
||||
/^ id \s+ force_merge.type \s+ force_merge.active \s+ force_merge.size \s+ force_merge.queue \s+ force_merge.queueSize \s+ force_merge.rejected \s+ force_merge.largest \s+ force_merge.completed \s+ force_merge.min \s+ force_merge.max \s+ force_merge.keepAlive \s+ \n
|
||||
(\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \s+ \n)+ $/
|
||||
|
||||
- do:
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
"Force merge index tests":
|
||||
- do:
|
||||
indices.create:
|
||||
index: testing
|
||||
|
||||
- do:
|
||||
indices.forcemerge:
|
||||
index: testing
|
||||
max_num_segments: 1
|
Loading…
Reference in New Issue