Fix missed places referencing optimize force flag from removal in commit 1ae8195.

This commit is contained in:
Ryan Ernst 2014-10-07 08:56:38 -07:00
parent c06c10bbb0
commit 032184bd5e
6 changed files with 3 additions and 47 deletions

View File

@ -73,15 +73,6 @@ public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder<Opt
return this; return this;
} }
/**
* Should the merge be forced even if there is a single segment with no deletions in the shard.
* Defaults to <tt>false</tt>.
*/
public OptimizeRequestBuilder setForce(boolean force) {
request.force(force);
return this;
}
@Override @Override
protected void doExecute(ActionListener<OptimizeResponse> listener) { protected void doExecute(ActionListener<OptimizeResponse> listener) {
client.optimize(request, listener); client.optimize(request, listener);

View File

@ -59,7 +59,6 @@ public class RestOptimizeAction extends BaseRestHandler {
optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments())); optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments()));
optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes())); optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes()));
optimizeRequest.flush(request.paramAsBoolean("flush", optimizeRequest.flush())); optimizeRequest.flush(request.paramAsBoolean("flush", optimizeRequest.flush()));
optimizeRequest.force(request.paramAsBoolean("force", optimizeRequest.force()));
client.admin().indices().optimize(optimizeRequest, new RestBuilderListener<OptimizeResponse>(channel) { client.admin().indices().optimize(optimizeRequest, new RestBuilderListener<OptimizeResponse>(channel) {
@Override @Override
public RestResponse buildResponse(OptimizeResponse response, XContentBuilder builder) throws Exception { public RestResponse buildResponse(OptimizeResponse response, XContentBuilder builder) throws Exception {

View File

@ -137,40 +137,6 @@ public class InternalEngineIntegrationTest extends ElasticsearchIntegrationTest
assertTotalCompoundSegments(2, 3, "test"); assertTotalCompoundSegments(2, 3, "test");
} }
public void testForceOptimize() throws ExecutionException, InterruptedException {
boolean compound = randomBoolean();
assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder()
.put("number_of_replicas", 0)
.put("number_of_shards", 1)
// this is important otherwise the MP will still trigger a merge even if there is only one segment
.put(InternalEngine.INDEX_COMPOUND_ON_FLUSH, compound)
.put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, compound)
));
final int numDocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
flushAndRefresh();
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setWaitForMerge(true).get();
IndexSegments firstSegments = client().admin().indices().prepareSegments("test").get().getIndices().get("test");
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setWaitForMerge(true).get();
IndexSegments secondsSegments = client().admin().indices().prepareSegments("test").get().getIndices().get("test");
assertThat(segments(firstSegments), Matchers.containsInAnyOrder(segments(secondsSegments).toArray()));
assertThat(segments(firstSegments).size(), Matchers.equalTo(1));
assertThat(segments(secondsSegments), Matchers.containsInAnyOrder(segments(firstSegments).toArray()));
assertThat(segments(secondsSegments).size(), Matchers.equalTo(1));
client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setWaitForMerge(true).setForce(true).get();
IndexSegments thirdSegments = client().admin().indices().prepareSegments("test").get().getIndices().get("test");
assertThat(segments(firstSegments).size(), Matchers.equalTo(1));
assertThat(segments(thirdSegments).size(), Matchers.equalTo(1));
assertThat(segments(firstSegments), Matchers.not(Matchers.containsInAnyOrder(segments(thirdSegments).toArray())));
assertThat(segments(thirdSegments), Matchers.not(Matchers.containsInAnyOrder(segments(firstSegments).toArray())));
}
private void assertTotalCompoundSegments(int i, int t, String index) { private void assertTotalCompoundSegments(int i, int t, String index) {
IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().prepareSegments(index).get(); IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().prepareSegments(index).get();
IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(index); IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(index);

View File

@ -1358,7 +1358,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
} }
indexRandom(true, builders); indexRandom(true, builders);
flushAndRefresh(); flushAndRefresh();
assertNoFailures(client().admin().indices().prepareOptimize("test").setForce(true).setFlush(true).setWaitForMerge(true).setMaxNumSegments(1).get()); assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setWaitForMerge(true).setMaxNumSegments(1).get());
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get(); CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));

View File

@ -185,7 +185,7 @@ public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCo
} }
indexRandom(true, builders); indexRandom(true, builders);
flushAndRefresh(); flushAndRefresh();
assertNoFailures(client().admin().indices().prepareOptimize("test").setForce(true).setFlush(true).setWaitForMerge(true).setMaxNumSegments(1).get()); assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setWaitForMerge(true).setMaxNumSegments(1).get());
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get(); CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));

View File

@ -1194,7 +1194,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
*/ */
protected OptimizeResponse optimize() { protected OptimizeResponse optimize() {
waitForRelocation(); waitForRelocation();
OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setForce(randomBoolean()).execute().actionGet(); OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet();
assertNoFailures(actionGet); assertNoFailures(actionGet);
return actionGet; return actionGet;
} }