[Remove] Deprecated Synced Flush API (#1761)

Remove the deprecated sync flush API which was replaced by sequence
number and retention lease mechanisms and no longer used in 2.0.0.

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
Nick Knize 2022-01-26 22:18:48 -06:00 committed by GitHub
parent 0f3b72b7bb
commit 0791c88af1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 325 additions and 4335 deletions

View File

@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
@ -931,53 +930,6 @@ public final class IndicesClient {
); );
} }
/**
* Initiate a synced flush manually using the synced flush API.
*
* @param syncedFlushRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
* @deprecated synced flush is deprecated and will be removed in 8.0.
* Use {@link #flush(FlushRequest, RequestOptions)} instead.
*/
@Deprecated
public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
syncedFlushRequest,
IndicesRequestConverters::flushSynced,
options,
SyncedFlushResponse::fromXContent,
emptySet()
);
}
/**
* Asynchronously initiate a synced flush manually using the synced flush API.
*
* @param syncedFlushRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
* @deprecated synced flush is deprecated and will be removed in 8.0.
* Use {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)} instead.
*/
@Deprecated
public Cancellable flushSyncedAsync(
SyncedFlushRequest syncedFlushRequest,
RequestOptions options,
ActionListener<SyncedFlushResponse> listener
) {
return restHighLevelClient.performRequestAsyncAndParseEntity(
syncedFlushRequest,
IndicesRequestConverters::flushSynced,
options,
SyncedFlushResponse::fromXContent,
listener,
emptySet()
);
}
/** /**
* Retrieve the settings of one or more indices. * Retrieve the settings of one or more indices.
* *

View File

@ -42,7 +42,6 @@ import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequest;
@ -322,15 +321,6 @@ final class IndicesRequestConverters {
return request; return request;
} }
static Request flushSynced(SyncedFlushRequest syncedFlushRequest) {
String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced"));
RequestConverters.Params parameters = new RequestConverters.Params();
parameters.withIndicesOptions(syncedFlushRequest.indicesOptions());
request.addParameters(parameters.asMap());
return request;
}
static Request forceMerge(ForceMergeRequest forceMergeRequest) { static Request forceMerge(ForceMergeRequest forceMergeRequest) {
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge"));

View File

@ -1,346 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.client;
import org.opensearch.common.ParseField;
import org.opensearch.common.ParsingException;
import org.opensearch.common.xcontent.ConstructingObjectParser;
import org.opensearch.common.xcontent.ToXContentFragment;
import org.opensearch.common.xcontent.ToXContentObject;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentLocation;
import org.opensearch.common.xcontent.XContentParser;
import org.opensearch.common.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
public class SyncedFlushResponse implements ToXContentObject {
public static final String SHARDS_FIELD = "_shards";
private ShardCounts totalCounts;
private Map<String, IndexResult> indexResults;
SyncedFlushResponse(ShardCounts totalCounts, Map<String, IndexResult> indexResults) {
this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed);
this.indexResults = Collections.unmodifiableMap(indexResults);
}
/**
* @return The total number of shard copies that were processed across all indexes
*/
public int totalShards() {
return totalCounts.total;
}
/**
* @return The number of successful shard copies that were processed across all indexes
*/
public int successfulShards() {
return totalCounts.successful;
}
/**
* @return The number of failed shard copies that were processed across all indexes
*/
public int failedShards() {
return totalCounts.failed;
}
/**
* @return A map of results for each index where the keys of the map are the index names
* and the values are the results encapsulated in {@link IndexResult}.
*/
public Map<String, IndexResult> getIndexResults() {
return indexResults;
}
ShardCounts getShardCounts() {
return totalCounts;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startObject(SHARDS_FIELD);
totalCounts.toXContent(builder, params);
builder.endObject();
for (Map.Entry<String, IndexResult> entry : indexResults.entrySet()) {
String indexName = entry.getKey();
IndexResult indexResult = entry.getValue();
builder.startObject(indexName);
indexResult.toXContent(builder, params);
builder.endObject();
}
builder.endObject();
return builder;
}
public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser);
ShardCounts totalCounts = null;
Map<String, IndexResult> indexResults = new HashMap<>();
XContentLocation startLoc = parser.getTokenLocation();
while (parser.nextToken().equals(Token.FIELD_NAME)) {
if (parser.currentName().equals(SHARDS_FIELD)) {
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser);
totalCounts = ShardCounts.fromXContent(parser);
} else {
String indexName = parser.currentName();
IndexResult indexResult = IndexResult.fromXContent(parser);
indexResults.put(indexName, indexResult);
}
}
if (totalCounts != null) {
return new SyncedFlushResponse(totalCounts, indexResults);
} else {
throw new ParsingException(startLoc, "Unable to reconstruct object. Total counts for shards couldn't be parsed.");
}
}
/**
* Encapsulates the number of total successful and failed shard copies
*/
public static final class ShardCounts implements ToXContentFragment {
public static final String TOTAL_FIELD = "total";
public static final String SUCCESSFUL_FIELD = "successful";
public static final String FAILED_FIELD = "failed";
private static final ConstructingObjectParser<ShardCounts, Void> PARSER = new ConstructingObjectParser<>(
"shardcounts",
a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
}
private int total;
private int successful;
private int failed;
ShardCounts(int total, int successful, int failed) {
this.total = total;
this.successful = successful;
this.failed = failed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(TOTAL_FIELD, total);
builder.field(SUCCESSFUL_FIELD, successful);
builder.field(FAILED_FIELD, failed);
return builder;
}
public static ShardCounts fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
public boolean equals(ShardCounts other) {
if (other != null) {
return other.total == this.total && other.successful == this.successful && other.failed == this.failed;
} else {
return false;
}
}
}
/**
* Description for the flush/synced results for a particular index.
* This includes total, successful and failed copies along with failure description for each failed copy.
*/
public static final class IndexResult implements ToXContentFragment {
public static final String TOTAL_FIELD = "total";
public static final String SUCCESSFUL_FIELD = "successful";
public static final String FAILED_FIELD = "failed";
public static final String FAILURES_FIELD = "failures";
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<IndexResult, Void> PARSER = new ConstructingObjectParser<>(
"indexresult",
a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List<ShardFailure>) a[3])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD));
}
private ShardCounts counts;
private List<ShardFailure> failures;
IndexResult(int total, int successful, int failed, List<ShardFailure> failures) {
counts = new ShardCounts(total, successful, failed);
if (failures != null) {
this.failures = Collections.unmodifiableList(failures);
} else {
this.failures = Collections.unmodifiableList(new ArrayList<>());
}
}
/**
* @return The total number of shard copies that were processed for this index.
*/
public int totalShards() {
return counts.total;
}
/**
* @return The number of successful shard copies that were processed for this index.
*/
public int successfulShards() {
return counts.successful;
}
/**
* @return The number of failed shard copies that were processed for this index.
*/
public int failedShards() {
return counts.failed;
}
/**
* @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index.
*/
public List<ShardFailure> failures() {
return failures;
}
ShardCounts getShardCounts() {
return counts;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
counts.toXContent(builder, params);
if (failures.size() > 0) {
builder.startArray(FAILURES_FIELD);
for (ShardFailure failure : failures) {
failure.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
public static IndexResult fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
}
/**
* Description of a failed shard copy for an index.
*/
public static final class ShardFailure implements ToXContentFragment {
public static String SHARD_ID_FIELD = "shard";
public static String FAILURE_REASON_FIELD = "reason";
public static String ROUTING_FIELD = "routing";
private int shardId;
private String failureReason;
private Map<String, Object> routing;
@SuppressWarnings("unchecked")
static final ConstructingObjectParser<ShardFailure, Void> PARSER = new ConstructingObjectParser<>(
"shardfailure",
a -> new ShardFailure((Integer) a[0], (String) a[1], (Map<String, Object>) a[2])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD));
PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD));
PARSER.declareObject(optionalConstructorArg(), (parser, c) -> parser.map(), new ParseField(ROUTING_FIELD));
}
ShardFailure(int shardId, String failureReason, Map<String, Object> routing) {
this.shardId = shardId;
this.failureReason = failureReason;
if (routing != null) {
this.routing = Collections.unmodifiableMap(routing);
} else {
this.routing = Collections.unmodifiableMap(new HashMap<>());
}
}
/**
* @return Id of the shard whose copy failed
*/
public int getShardId() {
return shardId;
}
/**
* @return Reason for failure of the shard copy
*/
public String getFailureReason() {
return failureReason;
}
/**
* @return Additional information about the failure.
*/
public Map<String, Object> getRouting() {
return routing;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SHARD_ID_FIELD, shardId);
builder.field(FAILURE_REASON_FIELD, failureReason);
if (routing.size() > 0) {
builder.field(ROUTING_FIELD, routing);
}
builder.endObject();
return builder;
}
public static ShardFailure fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
}
}

View File

@ -46,7 +46,6 @@ import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
@ -126,7 +125,6 @@ import org.opensearch.index.IndexSettings;
import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.QueryBuilders;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.rest.RestStatus; import org.opensearch.rest.RestStatus;
import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; import org.opensearch.rest.action.admin.indices.RestCreateIndexAction;
import org.opensearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.opensearch.rest.action.admin.indices.RestGetFieldMappingAction;
@ -1080,39 +1078,6 @@ public class IndicesClientIT extends OpenSearchRestHighLevelClientTestCase {
} }
} }
public void testSyncedFlush() throws IOException {
{
String index = "index";
Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
createIndex(index, settings);
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index);
SyncedFlushResponse flushResponse = execute(
syncedFlushRequest,
highLevelClient().indices()::flushSynced,
highLevelClient().indices()::flushSyncedAsync,
expectWarningsOnce(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE)
);
assertThat(flushResponse.totalShards(), equalTo(1));
assertThat(flushResponse.successfulShards(), equalTo(1));
assertThat(flushResponse.failedShards(), equalTo(0));
}
{
String nonExistentIndex = "non_existent_index";
assertFalse(indexExists(nonExistentIndex));
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex);
OpenSearchException exception = expectThrows(
OpenSearchException.class,
() -> execute(
syncedFlushRequest,
highLevelClient().indices()::flushSynced,
highLevelClient().indices()::flushSyncedAsync,
expectWarningsOnce(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE)
)
);
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
}
public void testClearCache() throws IOException { public void testClearCache() throws IOException {
{ {
String index = "index"; String index = "index";

View File

@ -45,7 +45,6 @@ import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequest;
@ -750,33 +749,6 @@ public class IndicesRequestConvertersTests extends OpenSearchTestCase {
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
} }
public void testSyncedFlush() {
String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
SyncedFlushRequest syncedFlushRequest;
if (OpenSearchTestCase.randomBoolean()) {
syncedFlushRequest = new SyncedFlushRequest(indices);
} else {
syncedFlushRequest = new SyncedFlushRequest();
syncedFlushRequest.indices(indices);
}
Map<String, String> expectedParams = new HashMap<>();
RequestConvertersTests.setRandomIndicesOptions(
syncedFlushRequest::indicesOptions,
syncedFlushRequest::indicesOptions,
expectedParams
);
Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices));
}
endpoint.add("_flush/synced");
Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
Assert.assertThat(request.getEntity(), nullValue());
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
}
public void testForceMerge() { public void testForceMerge() {
String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); String[] indices = OpenSearchTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
ForceMergeRequest forceMergeRequest; ForceMergeRequest forceMergeRequest;

View File

@ -1,258 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.client;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntMap;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.TestShardRouting;
import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.xcontent.DeprecationHandler;
import org.opensearch.common.xcontent.ToXContent;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentParser;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.index.shard.ShardId;
import org.opensearch.indices.flush.ShardsSyncedFlushResult;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.test.OpenSearchTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class SyncedFlushResponseTests extends OpenSearchTestCase {
public void testXContentSerialization() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
TestPlan plan = createTestPlan();
XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent());
assertNotNull(plan.result);
serverResponsebuilder.startObject();
plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS);
serverResponsebuilder.endObject();
XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent());
assertNotNull(plan.result);
plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS);
Map<String, Object> serverContentMap = convertFailureListToSet(
serverResponsebuilder.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(),
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(serverResponsebuilder).streamInput()
)
.map()
);
Map<String, Object> clientContentMap = convertFailureListToSet(
clientResponsebuilder.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(),
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(clientResponsebuilder).streamInput()
)
.map()
);
assertEquals(serverContentMap, clientContentMap);
}
public void testXContentDeserialization() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
TestPlan plan = createTestPlan();
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
builder.startObject();
plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
XContentParser parser = builder.generator()
.contentType()
.xContent()
.createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, BytesReference.bytes(builder).streamInput());
SyncedFlushResponse originalResponse = plan.clientResult;
SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser);
assertNotNull(parsedResponse);
assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts());
for (Map.Entry<String, SyncedFlushResponse.IndexResult> entry : originalResponse.getIndexResults().entrySet()) {
String index = entry.getKey();
SyncedFlushResponse.IndexResult responseResult = entry.getValue();
SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index);
assertNotNull(responseResult);
assertNotNull(parsedResult);
assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts());
assertEquals(responseResult.failures().size(), parsedResult.failures().size());
for (SyncedFlushResponse.ShardFailure responseShardFailure : responseResult.failures()) {
assertTrue(containsFailure(parsedResult.failures(), responseShardFailure));
}
}
}
static class TestPlan {
SyncedFlushResponse.ShardCounts totalCounts;
Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
org.opensearch.action.admin.indices.flush.SyncedFlushResponse result;
SyncedFlushResponse clientResult;
}
TestPlan createTestPlan() throws IOException {
final TestPlan testPlan = new TestPlan();
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
Map<String, SyncedFlushResponse.IndexResult> indexResults = new HashMap<>();
final XContentType xContentType = randomFrom(XContentType.values());
final int indexCount = randomIntBetween(1, 10);
int totalShards = 0;
int totalSuccessful = 0;
int totalFailed = 0;
for (int i = 0; i < indexCount; i++) {
final String index = "index_" + i;
int shards = randomIntBetween(1, 4);
int replicas = randomIntBetween(0, 2);
int successful = 0;
int failed = 0;
int failures = 0;
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
List<SyncedFlushResponse.ShardFailure> shardFailures = new ArrayList<>();
for (int shard = 0; shard < shards; shard++) {
final ShardId shardId = new ShardId(index, "_na_", shard);
if (randomInt(5) < 2) {
// total shard failure
failed += replicas + 1;
failures++;
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
shardFailures.add(new SyncedFlushResponse.ShardFailure(shardId.id(), "simulated total failure", new HashMap<>()));
} else {
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
for (int copy = 0; copy < replicas + 1; copy++) {
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
index,
shard,
"node_" + shardId + "_" + copy,
null,
copy == 0,
ShardRoutingState.STARTED
);
if (randomInt(5) < 2) {
// shard copy failure
failed++;
failures++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
// Building the shardRouting map here.
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
Map<String, Object> routing = shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS)
.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(),
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(builder).streamInput()
)
.map();
shardFailures.add(new SyncedFlushResponse.ShardFailure(shardId.id(), "copy failure " + shardId, routing));
} else {
successful++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null));
}
}
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
}
}
indicesResults.put(index, shardsResults);
indexResults.put(index, new SyncedFlushResponse.IndexResult(shards * (replicas + 1), successful, failed, shardFailures));
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
testPlan.expectedFailuresPerIndex.put(index, failures);
totalFailed += failed;
totalShards += shards * (replicas + 1);
totalSuccessful += successful;
}
testPlan.result = new org.opensearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults);
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed);
testPlan.clientResult = new SyncedFlushResponse(
new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed),
indexResults
);
return testPlan;
}
public boolean containsFailure(List<SyncedFlushResponse.ShardFailure> failures, SyncedFlushResponse.ShardFailure origFailure) {
for (SyncedFlushResponse.ShardFailure failure : failures) {
if (failure.getShardId() == origFailure.getShardId()
&& failure.getFailureReason().equals(origFailure.getFailureReason())
&& failure.getRouting().equals(origFailure.getRouting())) {
return true;
}
}
return false;
}
public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) {
if (first == null) {
assertNull(second);
} else {
assertTrue(first.equals(second));
}
}
public Map<String, Object> convertFailureListToSet(Map<String, Object> input) {
Map<String, Object> retMap = new HashMap<>();
for (Map.Entry<String, Object> entry : input.entrySet()) {
if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) {
retMap.put(entry.getKey(), entry.getValue());
} else {
// This was an index entry.
@SuppressWarnings("unchecked")
Map<String, Object> indexResult = (Map<String, Object>) entry.getValue();
Map<String, Object> retResult = new HashMap<>();
for (Map.Entry<String, Object> entry2 : indexResult.entrySet()) {
if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) {
@SuppressWarnings("unchecked")
List<Object> failures = (List<Object>) entry2.getValue();
Set<Object> retSet = new HashSet<>(failures);
retResult.put(entry.getKey(), retSet);
} else {
retResult.put(entry2.getKey(), entry2.getValue());
}
}
retMap.put(entry.getKey(), retResult);
}
}
return retMap;
}
}

View File

@ -44,7 +44,6 @@ import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
@ -69,7 +68,6 @@ import org.opensearch.client.OpenSearchRestHighLevelClientTestCase;
import org.opensearch.client.GetAliasesResponse; import org.opensearch.client.GetAliasesResponse;
import org.opensearch.client.RequestOptions; import org.opensearch.client.RequestOptions;
import org.opensearch.client.RestHighLevelClient; import org.opensearch.client.RestHighLevelClient;
import org.opensearch.client.SyncedFlushResponse;
import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeRequest;
import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.AnalyzeResponse;
import org.opensearch.client.indices.CloseIndexRequest; import org.opensearch.client.indices.CloseIndexRequest;
@ -1012,94 +1010,6 @@ public class IndicesClientDocumentationIT extends OpenSearchRestHighLevelClientT
} }
} }
@SuppressWarnings("unused")
public void testSyncedFlushIndex() throws Exception {
RestHighLevelClient client = highLevelClient();
{
createIndex("index1", Settings.EMPTY);
}
{
// tag::flush-synced-request
SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1>
SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2>
SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3>
// end::flush-synced-request
// tag::flush-synced-request-indicesOptions
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::flush-synced-request-indicesOptions
// tag::flush-synced-execute
SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, expectWarnings(
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."
));
// end::flush-synced-execute
// tag::flush-synced-response
int totalShards = flushSyncedResponse.totalShards(); // <1>
int successfulShards = flushSyncedResponse.successfulShards(); // <2>
int failedShards = flushSyncedResponse.failedShards(); // <3>
for (Map.Entry<String, SyncedFlushResponse.IndexResult> responsePerIndexEntry:
flushSyncedResponse.getIndexResults().entrySet()) {
String indexName = responsePerIndexEntry.getKey(); // <4>
SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue();
int totalShardsForIndex = indexResult.totalShards(); // <5>
int successfulShardsForIndex = indexResult.successfulShards(); // <6>
int failedShardsForIndex = indexResult.failedShards(); // <7>
if (failedShardsForIndex > 0) {
for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) {
int shardId = failureEntry.getShardId(); // <8>
String failureReason = failureEntry.getFailureReason(); // <9>
Map<String, Object> routing = failureEntry.getRouting(); // <10>
}
}
}
// end::flush-synced-response
// tag::flush-synced-execute-listener
ActionListener<SyncedFlushResponse> listener = new ActionListener<SyncedFlushResponse>() {
@Override
public void onResponse(SyncedFlushResponse refreshResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::flush-synced-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::flush-synced-execute-async
client.indices().flushSyncedAsync(request, expectWarnings(
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."
), listener); // <1>
// end::flush-synced-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
{
// tag::flush-synced-notfound
try {
SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist");
client.indices().flushSynced(request, RequestOptions.DEFAULT);
} catch (OpenSearchException exception) {
if (exception.status() == RestStatus.NOT_FOUND) {
// <1>
}
}
// end::flush-synced-notfound
}
}
public void testGetSettings() throws Exception { public void testGetSettings() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();

View File

@ -737,7 +737,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
assertOK(client().performRequest(flushRequest)); assertOK(client().performRequest(flushRequest));
if (randomBoolean()) { if (randomBoolean()) {
performSyncedFlush(index, randomBoolean()); syncedFlush(index, randomBoolean());
} }
if (shouldHaveTranslog) { if (shouldHaveTranslog) {
// Update a few documents so we are sure to have a translog // Update a few documents so we are sure to have a translog
@ -1429,7 +1429,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
if (randomBoolean()) { if (randomBoolean()) {
flush(index, randomBoolean()); flush(index, randomBoolean());
} else if (randomBoolean()) { } else if (randomBoolean()) {
performSyncedFlush(index, randomBoolean()); syncedFlush(index, randomBoolean());
} }
saveInfoDocument("doc_count", Integer.toString(numDocs)); saveInfoDocument("doc_count", Integer.toString(numDocs));
} }

View File

@ -35,13 +35,17 @@ import org.apache.http.HttpHost;
import org.opensearch.LegacyESVersion; import org.opensearch.LegacyESVersion;
import org.opensearch.Version; import org.opensearch.Version;
import org.opensearch.client.Request; import org.opensearch.client.Request;
import org.opensearch.client.RequestOptions;
import org.opensearch.client.Response; import org.opensearch.client.Response;
import org.opensearch.client.ResponseException;
import org.opensearch.client.RestClient; import org.opensearch.client.RestClient;
import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.common.Strings; import org.opensearch.common.Strings;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.json.JsonXContent;
import org.opensearch.common.xcontent.support.XContentMapValues;
import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SeqNoStats;
import org.opensearch.rest.RestStatus;
import org.opensearch.rest.action.document.RestGetAction; import org.opensearch.rest.action.document.RestGetAction;
import org.opensearch.rest.action.document.RestIndexAction; import org.opensearch.rest.action.document.RestIndexAction;
import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.OpenSearchRestTestCase;
@ -49,11 +53,13 @@ import org.opensearch.test.rest.yaml.ObjectPath;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
public class IndexingIT extends OpenSearchRestTestCase { public class IndexingIT extends OpenSearchRestTestCase {
@ -295,6 +301,59 @@ public class IndexingIT extends OpenSearchRestTestCase {
request.setJsonEntity("{\"indices\": \"" + index + "\"}"); request.setJsonEntity("{\"indices\": \"" + index + "\"}");
} }
public void testSyncedFlushTransition() throws Exception {
Nodes nodes = buildNodeAndVersions();
assumeTrue("bwc version is on 1.x or Legacy 7.x", nodes.getBWCVersion().before(Version.V_2_0_0));
assumeFalse("no new node found", nodes.getNewNodes().isEmpty());
assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty());
// Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes
String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(","));
int numShards = randomIntBetween(1, 10);
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
int totalShards = numShards * (numOfReplicas + 1);
final String index = "test_synced_flush";
createIndex(index, Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
.put("index.routing.allocation.include._name", newNodes).build());
ensureGreen(index);
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
try (RestClient oldNodeClient = buildClient(restClientSettings(),
nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) {
Request request = new Request("POST", index + "/_flush/synced");
assertBusy(() -> {
ResponseException responseException = expectThrows(ResponseException.class, () -> oldNodeClient.performRequest(request));
assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus()));
assertThat(responseException.getResponse().getWarnings(),
contains("Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."));
Map<String, Object> result = ObjectPath.createFromResponse(responseException.getResponse()).evaluate("_shards");
assertThat(result.get("total"), equalTo(totalShards));
assertThat(result.get("successful"), equalTo(0));
assertThat(result.get("failed"), equalTo(totalShards));
});
Map<String, Object> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0));
}
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
try (RestClient newNodeClient = buildClient(restClientSettings(),
nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) {
Request request = new Request("POST", index + "/_flush/synced");
List<String> warningMsg = Arrays.asList("Synced flush was removed and a normal flush was performed instead. " +
"This transition will be removed in a future version.");
RequestOptions.Builder requestOptionsBuilder = RequestOptions.DEFAULT.toBuilder();
requestOptionsBuilder.setWarningsHandler(warnings -> warnings.equals(warningMsg) == false);
request.setOptions(requestOptionsBuilder);
assertBusy(() -> {
Map<String, Object> result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards");
assertThat(result.get("total"), equalTo(totalShards));
assertThat(result.get("successful"), equalTo(totalShards));
assertThat(result.get("failed"), equalTo(0));
});
Map<String, Object> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0));
}
}
private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { private void assertCount(final String index, final String preference, final int expectedCount) throws IOException {
Request request = new Request("GET", index + "/_count"); Request request = new Request("GET", index + "/_count");
request.addParameter("preference", preference); request.addParameter("preference", preference);

View File

@ -322,13 +322,13 @@ public class RecoveryIT extends AbstractRollingTestCase {
throw new IllegalStateException("unknown type " + CLUSTER_TYPE); throw new IllegalStateException("unknown type " + CLUSTER_TYPE);
} }
if (randomBoolean()) { if (randomBoolean()) {
performSyncedFlush(index, randomBoolean()); syncedFlush(index, randomBoolean());
ensureGlobalCheckpointSynced(index); ensureGlobalCheckpointSynced(index);
} }
} }
public void testRecovery() throws Exception { public void testRecovery() throws Exception {
final String index = "recover_with_soft_deletes"; final String index = "test_recovery";
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
@ -360,6 +360,9 @@ public class RecoveryIT extends AbstractRollingTestCase {
} }
} }
} }
if (randomBoolean()) {
syncedFlush(index, randomBoolean());
}
ensureGreen(index); ensureGreen(index);
} }
@ -671,7 +674,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
assertThat(XContentMapValues.extractValue("_source.updated_field", doc), equalTo(updates.get(docId))); assertThat(XContentMapValues.extractValue("_source.updated_field", doc), equalTo(updates.get(docId)));
} }
if (randomBoolean()) { if (randomBoolean()) {
performSyncedFlush(index, randomBoolean()); syncedFlush(index, randomBoolean());
ensureGlobalCheckpointSynced(index); ensureGlobalCheckpointSynced(index);
} }
} }

View File

@ -141,7 +141,7 @@ public class TranslogPolicyIT extends AbstractFullClusterRestartTestCase {
if (randomBoolean()) { if (randomBoolean()) {
flush(index, randomBoolean()); flush(index, randomBoolean());
} else if (randomBoolean()) { } else if (randomBoolean()) {
performSyncedFlush(index, randomBoolean()); syncedFlush(index, randomBoolean());
} }
} }
ensureGreen(index); ensureGreen(index);

View File

@ -1,62 +0,0 @@
{
"indices.flush_synced":{
"documentation":{
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html",
"description":"Performs a synced flush operation on one or more indices. Synced flush is deprecated and will be removed in 8.0. Use flush instead"
},
"stability":"stable",
"url":{
"paths":[
{
"path":"/_flush/synced",
"methods":[
"POST",
"GET"
],
"deprecated":{
"version":"7.6.0",
"description":"Synced flush is deprecated and will be removed in 8.0. Use flush instead."
}
},
{
"path":"/{index}/_flush/synced",
"methods":[
"POST",
"GET"
],
"parts":{
"index":{
"type":"list",
"description":"A comma-separated list of index names; use `_all` or empty string for all indices"
}
},
"deprecated":{
"version":"7.6.0",
"description":"Synced flush is deprecated and will be removed in 8.0. Use flush instead."
}
}
]
},
"params":{
"ignore_unavailable":{
"type":"boolean",
"description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)"
},
"allow_no_indices":{
"type":"boolean",
"description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
},
"expand_wildcards":{
"type":"enum",
"options":[
"open",
"closed",
"none",
"all"
],
"default":"open",
"description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
}
}
}
}

View File

@ -1,33 +1,3 @@
---
"Index synced flush rest test":
- skip:
version: " - 7.5.99"
reason: "synced flush is deprecated in 7.6"
features: "allowed_warnings"
- do:
indices.create:
index: testing
body:
settings:
index:
number_of_replicas: 0
- do:
cluster.health:
wait_for_status: green
- do:
allowed_warnings:
- Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.
indices.flush_synced:
index: testing
- is_false: _shards.failed
- do:
indices.stats: {level: shards}
- is_true: indices.testing.shards.0.0.commit.user_data.sync_id
--- ---
"Flush stats": "Flush stats":
- skip: - skip:

View File

@ -667,7 +667,7 @@ public class ShrinkIndexIT extends OpenSearchIntegTestCase {
IndexService indexShards = service.indexService(target.getIndex()); IndexService indexShards = service.indexService(target.getIndex());
IndexShard shard = indexShards.getShard(0); IndexShard shard = indexShards.getShard(0);
assertTrue(shard.isActive()); assertTrue(shard.isActive());
shard.checkIdle(0); shard.flushOnIdle(0);
assertFalse(shard.isActive()); assertFalse(shard.isActive());
} }
} }

View File

@ -33,7 +33,6 @@
package org.opensearch.gateway; package org.opensearch.gateway;
import org.opensearch.LegacyESVersion; import org.opensearch.LegacyESVersion;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse;
import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.admin.indices.stats.ShardStats;
import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNode;
@ -196,10 +195,6 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
.mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("f", "v")) .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("f", "v"))
.collect(Collectors.toList()) .collect(Collectors.toList())
); );
assertBusy(() -> {
SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get();
assertThat(syncedFlushResponse.successfulShards(), equalTo(2));
});
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica));
if (randomBoolean()) { if (randomBoolean()) {
indexRandom( indexRandom(
@ -548,12 +543,6 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
if (randomBoolean()) { if (randomBoolean()) {
client().admin().indices().prepareFlush(indexName).get(); client().admin().indices().prepareFlush(indexName).get();
} }
if (randomBoolean()) {
assertBusy(() -> {
SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get();
assertThat(syncedFlushResponse.successfulShards(), equalTo(1));
});
}
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode();
MockTransportService transportService = (MockTransportService) internalCluster().getInstance(TransportService.class, source); MockTransportService transportService = (MockTransportService) internalCluster().getInstance(TransportService.class, source);
Semaphore failRecovery = new Semaphore(1); Semaphore failRecovery = new Semaphore(1);
@ -587,10 +576,11 @@ public class ReplicaShardAllocatorIT extends OpenSearchIntegTestCase {
transportService.clearAllRules(); transportService.clearAllRules();
} }
private void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { public static void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception {
final ClusterService clusterService = internalCluster().clusterService();
assertBusy(() -> { assertBusy(() -> {
Index index = resolveIndex(indexName); Index index = resolveIndex(indexName);
Set<String> activeRetentionLeaseIds = clusterService().state() Set<String> activeRetentionLeaseIds = clusterService.state()
.routingTable() .routingTable()
.index(index) .index(index)
.shard(0) .shard(0)

View File

@ -38,7 +38,6 @@ import org.opensearch.Version;
import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListener;
import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.opensearch.action.admin.indices.stats.IndexStats;
import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchResponse;
@ -179,24 +178,6 @@ public class IndexShardIT extends OpenSearchSingleNodeTestCase {
} }
} }
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
assertAcked(
client().admin()
.indices()
.prepareCreate("test")
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))
);
client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get();
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
assertBusy(() -> {
IndexStats indexStats = client().admin().indices().prepareStats("test").clear().setTranslog(true).get().getIndex("test");
assertThat(indexStats.getTotal().translog.getUncommittedOperations(), equalTo(0));
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
});
}
public void testDurableFlagHasEffect() throws Exception { public void testDurableFlagHasEffect() throws Exception {
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();

View File

@ -1,495 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.indices.flush;
import org.apache.lucene.index.Term;
import org.opensearch.action.ActionListener;
import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse;
import org.opensearch.action.admin.indices.stats.IndexStats;
import org.opensearch.action.admin.indices.stats.ShardStats;
import org.opensearch.action.support.ActiveShardCount;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.opensearch.common.UUIDs;
import org.opensearch.common.ValidationException;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.ByteSizeUnit;
import org.opensearch.common.unit.ByteSizeValue;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.index.Index;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.engine.InternalEngine;
import org.opensearch.index.engine.InternalEngineTests;
import org.opensearch.index.mapper.ParsedDocument;
import org.opensearch.index.mapper.Uid;
import org.opensearch.index.seqno.SequenceNumbers;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.IndexShardTestCase;
import org.opensearch.index.shard.ShardId;
import org.opensearch.indices.IndexingMemoryController;
import org.opensearch.indices.IndicesService;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.InternalSettingsPlugin;
import org.opensearch.test.InternalTestCluster;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyArray;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
public class FlushIT extends OpenSearchIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(InternalSettingsPlugin.class);
}
public void testWaitIfOngoing() throws InterruptedException {
createIndex("test");
ensureGreen("test");
final int numIters = scaledRandomIntBetween(10, 30);
for (int i = 0; i < numIters; i++) {
for (int j = 0; j < 10; j++) {
client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get();
}
final CountDownLatch latch = new CountDownLatch(10);
final CopyOnWriteArrayList<Throwable> errors = new CopyOnWriteArrayList<>();
for (int j = 0; j < 10; j++) {
client().admin().indices().prepareFlush("test").execute(new ActionListener<FlushResponse>() {
@Override
public void onResponse(FlushResponse flushResponse) {
try {
// don't use assertAllSuccessful it uses a randomized context that belongs to a different thread
assertThat(
"Unexpected ShardFailures: " + Arrays.toString(flushResponse.getShardFailures()),
flushResponse.getFailedShards(),
equalTo(0)
);
latch.countDown();
} catch (Exception ex) {
onFailure(ex);
}
}
@Override
public void onFailure(Exception e) {
errors.add(e);
latch.countDown();
}
});
}
latch.await();
assertThat(errors, emptyIterable());
}
}
public void testRejectIllegalFlushParameters() {
createIndex("test");
int numDocs = randomIntBetween(0, 10);
for (int i = 0; i < numDocs; i++) {
client().prepareIndex("test", "_doc").setSource("{}", XContentType.JSON).get();
}
assertThat(
expectThrows(
ValidationException.class,
() -> client().admin().indices().flush(new FlushRequest().force(true).waitIfOngoing(false)).actionGet()
).getMessage(),
containsString("wait_if_ongoing must be true for a force flush")
);
assertThat(
client().admin().indices().flush(new FlushRequest().force(true).waitIfOngoing(true)).actionGet().getShardFailures(),
emptyArray()
);
assertThat(
client().admin().indices().flush(new FlushRequest().force(false).waitIfOngoing(randomBoolean())).actionGet().getShardFailures(),
emptyArray()
);
}
public void testSyncedFlush() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).get();
ensureGreen();
final Index index = client().admin().cluster().prepareState().get().getState().metadata().index("test").getIndex();
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
ShardsSyncedFlushResult result;
if (randomBoolean()) {
logger.info("--> sync flushing shard 0");
result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0));
} else {
logger.info("--> sync flushing index [test]");
SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get();
result = indicesResult.getShardsResultPerIndex().get("test").get(0);
}
assertFalse(result.failed());
assertThat(result.totalShards(), equalTo(indexStats.getShards().length));
assertThat(result.successfulShards(), equalTo(indexStats.getShards().length));
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
String syncId = result.syncId();
for (ShardStats shardStats : indexStats.getShards()) {
final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID);
assertThat(shardSyncId, equalTo(syncId));
}
// now, start new node and relocate a shard there and see if sync id still there
String newNodeName = internalCluster().startNode();
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next();
String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName();
assertFalse(currentNodeName.equals(newNodeName));
internalCluster().client()
.admin()
.cluster()
.prepareReroute()
.add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName))
.get();
client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).get();
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
client().admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build())
.get();
ensureGreen("test");
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
client().admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build())
.get();
ensureGreen("test");
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
public void testSyncedFlushWithConcurrentIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(3);
createIndex("test");
client().admin()
.indices()
.prepareUpdateSettings("test")
.setSettings(
Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
.put("index.refresh_interval", -1)
.put("index.number_of_replicas", internalCluster().numDataNodes() - 1)
)
.get();
ensureGreen();
final AtomicBoolean stop = new AtomicBoolean(false);
final AtomicInteger numDocs = new AtomicInteger(0);
Thread indexingThread = new Thread() {
@Override
public void run() {
while (stop.get() == false) {
client().prepareIndex().setIndex("test").setType("_doc").setSource("{}", XContentType.JSON).get();
numDocs.incrementAndGet();
}
}
};
indexingThread.start();
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
for (ShardStats shardStats : indexStats.getShards()) {
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
logger.info("--> trying sync flush");
SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get();
logger.info("--> sync flush done");
stop.set(true);
indexingThread.join();
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test"));
refresh();
assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get()));
logger.info("indexed {} docs", client().prepareSearch().setSize(0).get().getHits().getTotalHits().value);
logClusterState();
internalCluster().fullRestart();
ensureGreen();
assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get()));
}
private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List<ShardsSyncedFlushResult> syncedFlushResults) {
for (final ShardStats shardStats : shardsStats) {
for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) {
if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) {
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> singleResponse : shardResult.shardResponses()
.entrySet()) {
if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) {
if (singleResponse.getValue().success()) {
logger.info(
"{} sync flushed on node {}",
singleResponse.getKey().shardId(),
singleResponse.getKey().currentNodeId()
);
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
} else {
logger.info(
"{} sync flush failed for on node {}",
singleResponse.getKey().shardId(),
singleResponse.getKey().currentNodeId()
);
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}
}
}
}
}
public void testUnallocatedShardsDoesNotHang() throws InterruptedException {
// create an index but disallow allocation
prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE)
.setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent"))
.get();
// this should not hang but instead immediately return with empty result set
List<ShardsSyncedFlushResult> shardsResult = client().admin()
.indices()
.prepareSyncedFlush("test")
.get()
.getShardsResultPerIndex()
.get("test");
// just to make sure the test actually tests the right thing
int numShards = client().admin()
.indices()
.prepareGetSettings("test")
.get()
.getIndexToSettings()
.get("test")
.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, -1);
assertThat(shardsResult.size(), equalTo(numShards));
assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards"));
}
private void indexDoc(Engine engine, String id) throws IOException {
final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null);
final Engine.IndexResult indexResult = engine.index(
new Engine.Index(
new Term("_id", Uid.encodeId(doc.id())),
doc,
((InternalEngine) engine).getProcessedLocalCheckpoint() + 1,
1L,
1L,
null,
Engine.Operation.Origin.REPLICA,
System.nanoTime(),
-1L,
false,
SequenceNumbers.UNASSIGNED_SEQ_NO,
0
)
);
assertThat(indexResult.getFailure(), nullValue());
engine.syncTranslog();
}
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
assertAcked(
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
).get()
);
ensureGreen();
final Index index = clusterService().state().metadata().index("test").getIndex();
final ShardId shardId = new ShardId(index, 0);
final int numDocs = between(1, 10);
for (int i = 0; i < numDocs; i++) {
index("test", "doc", Integer.toString(i));
}
final List<IndexShard> indexShards = internalCluster().nodesInclude("test")
.stream()
.map(node -> internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId))
.collect(Collectors.toList());
// Index extra documents to one replica - synced-flush should fail on that replica.
final IndexShard outOfSyncReplica = randomValueOtherThanMany(s -> s.routingEntry().primary(), () -> randomFrom(indexShards));
final int extraDocs = between(1, 10);
for (int i = 0; i < extraDocs; i++) {
indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i);
}
final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1));
assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas));
assertThat(
partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason,
equalTo(
"ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]"
)
);
// Index extra documents to all shards - synced-flush should be ok.
for (IndexShard indexShard : indexShards) {
// Do reindex documents to the out of sync replica to avoid trigger merges
if (indexShard != outOfSyncReplica) {
for (int i = 0; i < extraDocs; i++) {
indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i);
}
}
}
final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1));
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
}
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
assertAcked(
prepareCreate("test").setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
).get()
);
ensureGreen();
final Index index = clusterService().state().metadata().index("test").getIndex();
final ShardId shardId = new ShardId(index, 0);
final int numDocs = between(1, 10);
for (int i = 0; i < numDocs; i++) {
index("test", "doc", Integer.toString(i));
}
final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1));
// Do not renew synced-flush
final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId()));
// Shards were updated, renew synced flush.
final int moreDocs = between(1, 10);
for (int i = 0; i < moreDocs; i++) {
index("test", "doc", "more-" + i);
}
final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId())));
// Manually remove or change sync-id, renew synced flush.
IndexShard shard = internalCluster().getInstance(IndicesService.class, randomFrom(internalCluster().nodesInclude("test")))
.getShardOrNull(shardId);
if (randomBoolean()) {
// Change the existing sync-id of a single shard.
shard.syncFlush(UUIDs.randomBase64UUID(random()), shard.commitStats().getRawCommitId());
assertThat(shard.commitStats().syncId(), not(equalTo(thirdSeal.syncId())));
} else {
// Flush will create a new commit without sync-id
shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true));
assertThat(shard.commitStats().syncId(), nullValue());
}
final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId())));
}
public void testFlushOnInactive() throws Exception {
final String indexName = "flush_on_inactive";
List<String> dataNodes = internalCluster().startDataOnlyNodes(
2,
Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), randomTimeValue(10, 1000, "ms")).build()
);
assertAcked(
client().admin()
.indices()
.prepareCreate(indexName)
.setSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(200, 500, "ms"))
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), randomTimeValue(50, 200, "ms"))
.put("index.routing.allocation.include._name", String.join(",", dataNodes))
.build()
)
);
ensureGreen(indexName);
int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
client().prepareIndex(indexName, "_doc").setSource("f", "v").get();
}
if (randomBoolean()) {
internalCluster().restartNode(randomFrom(dataNodes), new InternalTestCluster.RestartCallback());
ensureGreen(indexName);
}
assertBusy(() -> {
for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) {
assertThat(shardStats.getStats().getTranslog().getUncommittedOperations(), equalTo(0));
}
}, 30, TimeUnit.SECONDS);
}
}

View File

@ -81,6 +81,7 @@ import org.opensearch.common.unit.ByteSizeValue;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException;
import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.XContentType;
import org.opensearch.gateway.ReplicaShardAllocatorIT;
import org.opensearch.index.Index; import org.opensearch.index.Index;
import org.opensearch.index.IndexService; import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSettings;
@ -100,7 +101,6 @@ import org.opensearch.index.store.StoreStats;
import org.opensearch.indices.IndicesService; import org.opensearch.indices.IndicesService;
import org.opensearch.indices.NodeIndicesStats; import org.opensearch.indices.NodeIndicesStats;
import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.analysis.AnalysisModule;
import org.opensearch.indices.flush.SyncedFlushUtil;
import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.indices.recovery.RecoveryState.Stage;
import org.opensearch.node.NodeClosedException; import org.opensearch.node.NodeClosedException;
import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.node.RecoverySettingsChunkSizePlugin;
@ -138,7 +138,6 @@ import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore; import java.util.concurrent.Semaphore;
@ -148,7 +147,6 @@ import java.util.function.BiConsumer;
import java.util.function.Consumer; import java.util.function.Consumer;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
@ -403,7 +401,23 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
final String nodeA = internalCluster().startNode(); final String nodeA = internalCluster().startNode();
logger.info("--> create index on node: {}", nodeA); logger.info("--> create index on node: {}", nodeA);
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT).getShards()[0].getStats().getStore().size(); createIndex(
INDEX_NAME,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms")
.build()
);
int numDocs = randomIntBetween(10, 200);
final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE)
.setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat());
}
indexRandom(randomBoolean(), docs);
logger.info("--> start node B"); logger.info("--> start node B");
// force a shard recovery from nodeA to nodeB // force a shard recovery from nodeA to nodeB
@ -425,8 +439,7 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
logger.info("--> start node C"); logger.info("--> start node C");
final String nodeC = internalCluster().startNode(); final String nodeC = internalCluster().startNode();
// do sync flush to gen sync id ReplicaShardAllocatorIT.ensureActivePeerRecoveryRetentionLeasesAdvanced(INDEX_NAME);
assertThat(client().admin().indices().prepareSyncedFlush(INDEX_NAME).get().failedShards(), equalTo(0));
// hold peer recovery on phase 2 after nodeB down // hold peer recovery on phase 2 after nodeB down
CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); CountDownLatch phase1ReadyBlocked = new CountDownLatch(1);
@ -1524,93 +1537,6 @@ public class IndexRecoveryIT extends OpenSearchIntegTestCase {
ensureGreen(indexName); ensureGreen(indexName);
} }
public void testRecoveryFlushReplica() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(3);
String indexName = "test-index";
createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build());
int numDocs = randomIntBetween(0, 10);
indexRandom(
randomBoolean(),
false,
randomBoolean(),
IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())
);
assertAcked(
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.number_of_replicas", 1))
);
ensureGreen(indexName);
ShardId shardId = null;
for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) {
shardId = shardStats.getShardRouting().shardId();
if (shardStats.getShardRouting().primary() == false) {
assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs));
SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(
shardStats.getCommitStats().getUserData().entrySet()
);
assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint()));
assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo()));
}
}
SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0)));
assertAcked(
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.number_of_replicas", 2))
);
ensureGreen(indexName);
// Recovery should keep syncId if no indexing activity on the primary after synced-flush.
Set<String> syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards())
.map(shardStats -> shardStats.getCommitStats().syncId())
.collect(Collectors.toSet());
assertThat(syncIds, hasSize(1));
}
public void testRecoveryUsingSyncedFlushWithoutRetentionLease() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test-index";
createIndex(
indexName,
Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 1)
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") // do not reallocate the lost shard
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "100ms") // expire leases quickly
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") // sync frequently
.build()
);
int numDocs = randomIntBetween(0, 10);
indexRandom(
randomBoolean(),
false,
randomBoolean(),
IntStream.range(0, numDocs).mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())
);
ensureGreen(indexName);
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
assertThat(SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId).successfulShards(), equalTo(2));
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
final ShardRouting shardToResync = randomFrom(clusterState.routingTable().shardRoutingTable(shardId).activeShards());
internalCluster().restartNode(
clusterState.nodes().get(shardToResync.currentNodeId()).getName(),
new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertBusy(
() -> assertFalse(
client().admin().indices().prepareStats(indexName).get().getShards()[0].getRetentionLeaseStats()
.retentionLeases()
.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardToResync))
)
);
return super.onNodeStopped(nodeName);
}
}
);
ensureGreen(indexName);
}
public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2); internalCluster().ensureAtLeastNumDataNodes(2);
List<String> nodes = randomSubsetOf( List<String> nodes = randomSubsetOf(

View File

@ -484,11 +484,7 @@ public class CloseIndexIT extends OpenSearchIntegTestCase {
.collect(toList()) .collect(toList())
); );
ensureGreen(indexName); ensureGreen(indexName);
if (randomBoolean()) {
client().admin().indices().prepareFlush(indexName).get(); client().admin().indices().prepareFlush(indexName).get();
} else {
client().admin().indices().prepareSyncedFlush(indexName).get();
}
// index more documents while one shard copy is offline // index more documents while one shard copy is offline
internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() {
@Override @Override

View File

@ -140,9 +140,7 @@ import org.opensearch.action.admin.indices.exists.indices.TransportIndicesExists
import org.opensearch.action.admin.indices.exists.types.TransportTypesExistsAction; import org.opensearch.action.admin.indices.exists.types.TransportTypesExistsAction;
import org.opensearch.action.admin.indices.exists.types.TypesExistsAction; import org.opensearch.action.admin.indices.exists.types.TypesExistsAction;
import org.opensearch.action.admin.indices.flush.FlushAction; import org.opensearch.action.admin.indices.flush.FlushAction;
import org.opensearch.action.admin.indices.flush.SyncedFlushAction;
import org.opensearch.action.admin.indices.flush.TransportFlushAction; import org.opensearch.action.admin.indices.flush.TransportFlushAction;
import org.opensearch.action.admin.indices.flush.TransportSyncedFlushAction;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction;
import org.opensearch.action.admin.indices.forcemerge.TransportForceMergeAction; import org.opensearch.action.admin.indices.forcemerge.TransportForceMergeAction;
import org.opensearch.action.admin.indices.get.GetIndexAction; import org.opensearch.action.admin.indices.get.GetIndexAction;
@ -589,7 +587,6 @@ public class ActionModule extends AbstractModule {
actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class);
actions.register(FlushAction.INSTANCE, TransportFlushAction.class); actions.register(FlushAction.INSTANCE, TransportFlushAction.class);
actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);

View File

@ -1,45 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import org.opensearch.action.ActionType;
public class SyncedFlushAction extends ActionType<SyncedFlushResponse> {
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
public static final String NAME = "indices:admin/synced_flush";
private SyncedFlushAction() {
super(NAME, SyncedFlushResponse::new);
}
}

View File

@ -1,69 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import org.opensearch.action.support.broadcast.BroadcastRequest;
import org.opensearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.util.Arrays;
/**
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
* and writes the same sync id to primary and all copies.
*
* <p>Best created with {@link org.opensearch.client.Requests#syncedFlushRequest(String...)}. </p>
*
* @see org.opensearch.client.Requests#flushRequest(String...)
* @see org.opensearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
* @see SyncedFlushResponse
*/
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
/**
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
* be sync flushed.
*/
public SyncedFlushRequest(String... indices) {
super(indices);
}
public SyncedFlushRequest(StreamInput in) throws IOException {
super(in);
}
@Override
public String toString() {
return "SyncedFlushRequest{" + "indices=" + Arrays.toString(indices) + "}";
}
}

View File

@ -1,54 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import org.opensearch.action.ActionRequestBuilder;
import org.opensearch.action.support.IndicesOptions;
import org.opensearch.client.OpenSearchClient;
public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse> {
public SyncedFlushRequestBuilder(OpenSearchClient client, SyncedFlushAction action) {
super(client, action, new SyncedFlushRequest());
}
public SyncedFlushRequestBuilder setIndices(String[] indices) {
super.request().indices(indices);
return this;
}
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
super.request().indicesOptions(indicesOptions);
return this;
}
}

View File

@ -1,226 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import org.opensearch.action.ActionResponse;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.io.stream.Writeable;
import org.opensearch.common.util.iterable.Iterables;
import org.opensearch.common.xcontent.ToXContentFragment;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.indices.flush.ShardsSyncedFlushResult;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.unmodifiableMap;
/**
* The result of performing a sync flush operation on all shards of multiple indices
*/
public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment {
private final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
private final ShardCounts shardCounts;
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
// shardsResultPerIndex is never modified after it is passed to this
// constructor so this is safe even though shardsResultPerIndex is a
// ConcurrentHashMap
this.shardsResultPerIndex = unmodifiableMap(shardsResultPerIndex);
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
}
public SyncedFlushResponse(StreamInput in) throws IOException {
super(in);
shardCounts = new ShardCounts(in);
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
int numShardsResults = in.readInt();
for (int i = 0; i < numShardsResults; i++) {
String index = in.readString();
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
int numShards = in.readInt();
for (int j = 0; j < numShards; j++) {
shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in));
}
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
}
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
}
/**
* total number shards, including replicas, both assigned and unassigned
*/
public int totalShards() {
return shardCounts.total;
}
/**
* total number of shards for which the operation failed
*/
public int failedShards() {
return shardCounts.failed;
}
/**
* total number of shards which were successfully sync-flushed
*/
public int successfulShards() {
return shardCounts.successful;
}
public RestStatus restStatus() {
return failedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT;
}
public Map<String, List<ShardsSyncedFlushResult>> getShardsResultPerIndex() {
return shardsResultPerIndex;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields._SHARDS);
shardCounts.toXContent(builder, params);
builder.endObject();
for (Map.Entry<String, List<ShardsSyncedFlushResult>> indexEntry : shardsResultPerIndex.entrySet()) {
List<ShardsSyncedFlushResult> indexResult = indexEntry.getValue();
builder.startObject(indexEntry.getKey());
ShardCounts indexShardCounts = calculateShardCounts(indexResult);
indexShardCounts.toXContent(builder, params);
if (indexShardCounts.failed > 0) {
builder.startArray(Fields.FAILURES);
for (ShardsSyncedFlushResult shardResults : indexResult) {
if (shardResults.failed()) {
builder.startObject();
builder.field(Fields.SHARD, shardResults.shardId().id());
builder.field(Fields.REASON, shardResults.failureReason());
builder.endObject();
continue;
}
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
builder.startObject();
builder.field(Fields.SHARD, shardResults.shardId().id());
builder.field(Fields.REASON, shardEntry.getValue().failureReason());
builder.field(Fields.ROUTING, shardEntry.getKey());
builder.endObject();
}
}
builder.endArray();
}
builder.endObject();
}
return builder;
}
static ShardCounts calculateShardCounts(Iterable<ShardsSyncedFlushResult> results) {
int total = 0, successful = 0, failed = 0;
for (ShardsSyncedFlushResult result : results) {
total += result.totalShards();
successful += result.successfulShards();
if (result.failed()) {
// treat all shard copies as failed
failed += result.totalShards();
} else {
// some shards may have failed during the sync phase
failed += result.failedShards().size();
}
}
return new ShardCounts(total, successful, failed);
}
static final class ShardCounts implements ToXContentFragment, Writeable {
public final int total;
public final int successful;
public final int failed;
ShardCounts(int total, int successful, int failed) {
this.total = total;
this.successful = successful;
this.failed = failed;
}
ShardCounts(StreamInput in) throws IOException {
total = in.readInt();
successful = in.readInt();
failed = in.readInt();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOTAL, total);
builder.field(Fields.SUCCESSFUL, successful);
builder.field(Fields.FAILED, failed);
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(total);
out.writeInt(successful);
out.writeInt(failed);
}
}
static final class Fields {
static final String _SHARDS = "_shards";
static final String TOTAL = "total";
static final String SUCCESSFUL = "successful";
static final String FAILED = "failed";
static final String FAILURES = "failures";
static final String SHARD = "shard";
static final String ROUTING = "routing";
static final String REASON = "reason";
}
@Override
public void writeTo(StreamOutput out) throws IOException {
shardCounts.writeTo(out);
out.writeInt(shardsResultPerIndex.size());
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
out.writeString(entry.getKey());
out.writeInt(entry.getValue().size());
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
shardsSyncedFlushResult.writeTo(out);
}
}
}
}

View File

@ -32,6 +32,7 @@
package org.opensearch.action.admin.indices.flush; package org.opensearch.action.admin.indices.flush;
import org.opensearch.Version;
import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListener;
import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionFilters;
import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.ReplicationResponse;
@ -40,10 +41,16 @@ import org.opensearch.cluster.action.shard.ShardStateAction;
import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.inject.Inject; import org.opensearch.common.inject.Inject;
import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.ShardId;
import org.opensearch.indices.IndicesService; import org.opensearch.indices.IndicesService;
import org.opensearch.tasks.Task;
import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.TransportChannel;
import org.opensearch.transport.TransportRequest;
import org.opensearch.transport.TransportRequestHandler;
import org.opensearch.transport.TransportService; import org.opensearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
@ -75,6 +82,12 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
ShardFlushRequest::new, ShardFlushRequest::new,
ThreadPool.Names.FLUSH ThreadPool.Names.FLUSH
); );
transportService.registerRequestHandler(
PRE_SYNCED_FLUSH_ACTION_NAME,
ThreadPool.Names.FLUSH,
PreShardSyncedFlushRequest::new,
new PreSyncedFlushTransportHandler(indicesService)
);
} }
@Override @Override
@ -103,4 +116,43 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
return new ReplicaResult(); return new ReplicaResult();
}); });
} }
// TODO: Remove this transition in OpenSearch 3.0
private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
private static class PreShardSyncedFlushRequest extends TransportRequest {
private final ShardId shardId;
private PreShardSyncedFlushRequest(StreamInput in) throws IOException {
super(in);
assert in.getVersion().before(Version.V_2_0_0) : "received pre_sync request from a new node";
this.shardId = new ShardId(in);
}
@Override
public String toString() {
return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}';
}
@Override
public void writeTo(StreamOutput out) throws IOException {
assert false : "must not send pre_sync request from a new node";
throw new UnsupportedOperationException("");
}
}
private static final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
private final IndicesService indicesService;
PreSyncedFlushTransportHandler(IndicesService indicesService) {
this.indicesService = indicesService;
}
@Override
public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId.getIndex()).getShard(request.shardId.id());
indexShard.flush(new FlushRequest().force(false).waitIfOngoing(true));
throw new UnsupportedOperationException("Synced flush was removed and a normal flush was performed instead.");
}
}
} }

View File

@ -1,64 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import org.opensearch.action.ActionListener;
import org.opensearch.action.support.ActionFilters;
import org.opensearch.action.support.HandledTransportAction;
import org.opensearch.common.inject.Inject;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.tasks.Task;
import org.opensearch.transport.TransportService;
/**
* Synced flush ActionType.
*/
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
SyncedFlushService syncedFlushService;
@Inject
public TransportSyncedFlushAction(
TransportService transportService,
ActionFilters actionFilters,
SyncedFlushService syncedFlushService
) {
super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new);
this.syncedFlushService = syncedFlushService;
}
@Override
protected void doExecute(Task task, SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
}
}

View File

@ -66,9 +66,6 @@ import org.opensearch.action.admin.indices.exists.types.TypesExistsResponse;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder;
import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
@ -433,29 +430,6 @@ public interface IndicesAdminClient extends OpenSearchClient {
*/ */
FlushRequestBuilder prepareFlush(String... indices); FlushRequestBuilder prepareFlush(String... indices);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @return A result future
* @see org.opensearch.client.Requests#syncedFlushRequest(String...)
*/
ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @param listener A listener to be notified with a result
* @see org.opensearch.client.Requests#syncedFlushRequest(String...)
*/
void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*/
SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
/** /**
* Explicitly force merge one or more indices into a the number of segments. * Explicitly force merge one or more indices into a the number of segments.
* *

View File

@ -61,7 +61,6 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest;
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.opensearch.action.admin.indices.open.OpenIndexRequest; import org.opensearch.action.admin.indices.open.OpenIndexRequest;
@ -274,17 +273,6 @@ public class Requests {
return new FlushRequest(indices); return new FlushRequest(indices);
} }
/**
* Creates a synced flush indices request.
*
* @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices
* @return The synced flush request
* @see org.opensearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
*/
public static SyncedFlushRequest syncedFlushRequest(String... indices) {
return new SyncedFlushRequest(indices);
}
/** /**
* Creates a force merge request. * Creates a force merge request.
* *

View File

@ -200,10 +200,6 @@ import org.opensearch.action.admin.indices.flush.FlushAction;
import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.FlushRequestBuilder; import org.opensearch.action.admin.indices.flush.FlushRequestBuilder;
import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.admin.indices.flush.SyncedFlushAction;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction; import org.opensearch.action.admin.indices.forcemerge.ForceMergeAction;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
@ -1517,21 +1513,6 @@ public abstract class AbstractClient implements Client {
return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices);
} }
@Override
public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
return execute(SyncedFlushAction.INSTANCE, request);
}
@Override
public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
execute(SyncedFlushAction.INSTANCE, request, listener);
}
@Override
public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
}
@Override @Override
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) { public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
execute(GetMappingsAction.INSTANCE, request, listener); execute(GetMappingsAction.INSTANCE, request, listener);

View File

@ -135,21 +135,6 @@ final class CompositeIndexEventListener implements IndexEventListener {
} }
} }
@Override
public void onShardInactive(IndexShard indexShard) {
for (IndexEventListener listener : listeners) {
try {
listener.onShardInactive(indexShard);
} catch (Exception e) {
logger.warn(
() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()),
e
);
throw e;
}
}
}
@Override @Override
public void indexShardStateChanged( public void indexShardStateChanged(
IndexShard indexShard, IndexShard indexShard,

View File

@ -89,20 +89,6 @@ public final class CommitStats implements Writeable, ToXContentFragment {
return id; return id;
} }
/**
* A raw version of the commit id (see {@link SegmentInfos#getId()}
*/
public Engine.CommitId getRawCommitId() {
return new Engine.CommitId(Base64.getDecoder().decode(id));
}
/**
* The synced-flush id of the commit if existed.
*/
public String syncId() {
return userData.get(InternalEngine.SYNC_COMMIT_ID);
}
/** /**
* Returns the number of documents in the in this commit * Returns the number of documents in the in this commit
*/ */

View File

@ -60,9 +60,6 @@ import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.Nullable; import org.opensearch.common.Nullable;
import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.io.stream.Writeable;
import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasable;
import org.opensearch.common.lease.Releasables; import org.opensearch.common.lease.Releasables;
import org.opensearch.common.logging.Loggers; import org.opensearch.common.logging.Loggers;
@ -96,7 +93,6 @@ import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
import java.nio.file.NoSuchFileException; import java.nio.file.NoSuchFileException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Base64;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -121,7 +117,7 @@ import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
public abstract class Engine implements Closeable { public abstract class Engine implements Closeable {
public static final String SYNC_COMMIT_ID = "sync_id"; public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: remove sync_id in 3.0
public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String HISTORY_UUID_KEY = "history_uuid";
public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid";
public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no";
@ -577,22 +573,6 @@ public abstract class Engine implements Closeable {
} }
/**
* Attempts to do a special commit where the given syncID is put into the commit data. The attempt
* succeeds if there are not pending writes in lucene and the current point is equal to the expected one.
*
* @param syncId id of this sync
* @param expectedCommitId the expected value of
* @return true if the sync commit was made, false o.w.
*/
public abstract SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException;
public enum SyncedFlushResult {
SUCCESS,
COMMIT_MISMATCH,
PENDING_OPERATIONS
}
protected final GetResult getFromSearcher( protected final GetResult getFromSearcher(
Get get, Get get,
BiFunction<String, SearcherScope, Engine.Searcher> searcherFactory, BiFunction<String, SearcherScope, Engine.Searcher> searcherFactory,
@ -1139,20 +1119,17 @@ public abstract class Engine implements Closeable {
* @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed. * @param force if <code>true</code> a lucene commit is executed even if no changes need to be committed.
* @param waitIfOngoing if <code>true</code> this call will block until all currently running flushes have finished. * @param waitIfOngoing if <code>true</code> this call will block until all currently running flushes have finished.
* Otherwise this call will return without blocking. * Otherwise this call will return without blocking.
* @return the commit Id for the resulting commit
*/ */
public abstract CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException; public abstract void flush(boolean force, boolean waitIfOngoing) throws EngineException;
/** /**
* Flushes the state of the engine including the transaction log, clearing memory and persisting * Flushes the state of the engine including the transaction log, clearing memory and persisting
* documents in the lucene index to disk including a potentially heavy and durable fsync operation. * documents in the lucene index to disk including a potentially heavy and durable fsync operation.
* This operation is not going to block if another flush operation is currently running and won't write * This operation is not going to block if another flush operation is currently running and won't write
* a lucene commit if nothing needs to be committed. * a lucene commit if nothing needs to be committed.
*
* @return the commit Id for the resulting commit
*/ */
public final CommitId flush() throws EngineException { public final void flush() throws EngineException {
return flush(false, false); flush(false, false);
} }
/** /**
@ -1923,58 +1900,6 @@ public abstract class Engine implements Closeable {
} }
} }
public static class CommitId implements Writeable {
private final byte[] id;
public CommitId(byte[] id) {
assert id != null;
this.id = Arrays.copyOf(id, id.length);
}
/**
* Read from a stream.
*/
public CommitId(StreamInput in) throws IOException {
assert in != null;
this.id = in.readByteArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByteArray(id);
}
@Override
public String toString() {
return Base64.getEncoder().encodeToString(id);
}
public boolean idsEqual(byte[] id) {
return Arrays.equals(id, this.id);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CommitId commitId = (CommitId) o;
return Arrays.equals(id, commitId.id);
}
@Override
public int hashCode() {
return Arrays.hashCode(id);
}
}
public static class IndexCommitRef implements Closeable { public static class IndexCommitRef implements Closeable {
private final AtomicBoolean closed = new AtomicBoolean(); private final AtomicBoolean closed = new AtomicBoolean();
private final CheckedRunnable<IOException> onClose; private final CheckedRunnable<IOException> onClose;

View File

@ -1922,71 +1922,6 @@ public class InternalEngine extends Engine {
refresh("write indexing buffer", SearcherScope.INTERNAL, false); refresh("write indexing buffer", SearcherScope.INTERNAL, false);
} }
@Override
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException {
// best effort attempt before we acquire locks
ensureOpen();
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
ensureCanFlush();
// lets do a refresh to make sure we shrink the version map. This refresh will be either a no-op (just shrink the version map)
// or we also have uncommitted changes and that causes this syncFlush to fail.
refresh("sync_flush", SearcherScope.INTERNAL, true);
if (indexWriter.hasUncommittedChanges()) {
logger.trace("can't sync commit [{}]. have pending changes", syncId);
return SyncedFlushResult.PENDING_OPERATIONS;
}
if (expectedCommitId.idsEqual(lastCommittedSegmentInfos.getId()) == false) {
logger.trace("can't sync commit [{}]. current commit id is not equal to expected.", syncId);
return SyncedFlushResult.COMMIT_MISMATCH;
}
logger.trace("starting sync commit [{}]", syncId);
commitIndexWriter(indexWriter, translog, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
return SyncedFlushResult.SUCCESS;
} catch (IOException ex) {
maybeFailEngine("sync commit", ex);
throw new EngineException(shardId, "failed to sync commit", ex);
}
}
final boolean tryRenewSyncCommit() {
boolean renewed = false;
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
ensureCanFlush();
String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID);
long localCheckpointOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
if (syncId != null
&& indexWriter.hasUncommittedChanges()
&& translog.estimateTotalOperationsFromMinSeq(localCheckpointOfLastCommit + 1) == 0) {
logger.trace("start renewing sync commit [{}]", syncId);
commitIndexWriter(indexWriter, translog, syncId);
logger.debug("successfully sync committed. sync id [{}].", syncId);
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
renewed = true;
}
} catch (IOException ex) {
maybeFailEngine("renew sync commit", ex);
throw new EngineException(shardId, "failed to renew sync commit", ex);
}
if (renewed) {
// refresh outside of the write lock
// we have to refresh internal reader here to ensure we release unreferenced segments.
refresh("renew sync commit", SearcherScope.INTERNAL, true);
}
return renewed;
}
@Override @Override
public boolean shouldPeriodicallyFlush() { public boolean shouldPeriodicallyFlush() {
ensureOpen(); ensureOpen();
@ -2026,7 +1961,7 @@ public class InternalEngine extends Engine {
} }
@Override @Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { public void flush(boolean force, boolean waitIfOngoing) throws EngineException {
ensureOpen(); ensureOpen();
if (force && waitIfOngoing == false) { if (force && waitIfOngoing == false) {
assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing;
@ -2034,18 +1969,16 @@ public class InternalEngine extends Engine {
"wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing
); );
} }
final byte[] newCommitId;
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
ensureOpen(); ensureOpen();
if (flushLock.tryLock() == false) { if (flushLock.tryLock() == false) {
// if we can't get the lock right away we block if needed otherwise barf // if we can't get the lock right away we block if needed otherwise barf
if (waitIfOngoing) { if (waitIfOngoing == false) {
return;
}
logger.trace("waiting for in-flight flush to finish"); logger.trace("waiting for in-flight flush to finish");
flushLock.lock(); flushLock.lock();
logger.trace("acquired flush lock after blocking"); logger.trace("acquired flush lock after blocking");
} else {
return new CommitId(lastCommittedSegmentInfos.getId());
}
} else { } else {
logger.trace("acquired flush lock immediately"); logger.trace("acquired flush lock immediately");
} }
@ -2065,7 +1998,7 @@ public class InternalEngine extends Engine {
try { try {
translog.rollGeneration(); translog.rollGeneration();
logger.trace("starting commit for flush; commitTranslog=true"); logger.trace("starting commit for flush; commitTranslog=true");
commitIndexWriter(indexWriter, translog, null); commitIndexWriter(indexWriter, translog);
logger.trace("finished commit for flush"); logger.trace("finished commit for flush");
// a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved
@ -2088,7 +2021,6 @@ public class InternalEngine extends Engine {
refreshLastCommittedSegmentInfos(); refreshLastCommittedSegmentInfos();
} }
newCommitId = lastCommittedSegmentInfos.getId();
} catch (FlushFailedEngineException ex) { } catch (FlushFailedEngineException ex) {
maybeFailEngine("flush", ex); maybeFailEngine("flush", ex);
throw ex; throw ex;
@ -2101,7 +2033,6 @@ public class InternalEngine extends Engine {
if (engineConfig.isEnableGcDeletes()) { if (engineConfig.isEnableGcDeletes()) {
pruneDeletedTombstones(); pruneDeletedTombstones();
} }
return new CommitId(newCommitId);
} }
private void refreshLastCommittedSegmentInfos() { private void refreshLastCommittedSegmentInfos() {
@ -2273,10 +2204,8 @@ public class InternalEngine extends Engine {
this.forceMergeUUID = forceMergeUUID; this.forceMergeUUID = forceMergeUUID;
} }
if (flush) { if (flush) {
if (tryRenewSyncCommit() == false) {
flush(false, true); flush(false, true);
} }
}
if (upgrade) { if (upgrade) {
logger.info("finished segment upgrade"); logger.info("finished segment upgrade");
} }
@ -2664,16 +2593,10 @@ public class InternalEngine extends Engine {
@Override @Override
protected void doRun() { protected void doRun() {
// if we have no pending merges and we are supposed to flush once merges have finished // if we have no pending merges and we are supposed to flush once merges have finished to
// we try to renew a sync commit which is the case when we are having a big merge after we
// are inactive. If that didn't work we go and do a real flush which is ok since it only doesn't work
// if we either have records in the translog or if we don't have a sync ID at all...
// maybe even more important, we flush after all merges finish and we are inactive indexing-wise to
// free up transient disk usage of the (presumably biggish) segments that were just merged // free up transient disk usage of the (presumably biggish) segments that were just merged
if (tryRenewSyncCommit() == false) {
flush(); flush();
} }
}
}); });
} else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) {
// we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change // we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change
@ -2709,10 +2632,8 @@ public class InternalEngine extends Engine {
* *
* @param writer the index writer to commit * @param writer the index writer to commit
* @param translog the translog * @param translog the translog
* @param syncId the sync flush ID ({@code null} if not committing a synced flush)
* @throws IOException if an I/O exception occurs committing the specfied writer
*/ */
protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { protected void commitIndexWriter(final IndexWriter writer, final Translog translog) throws IOException {
ensureCanFlush(); ensureCanFlush();
try { try {
final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint();
@ -2729,9 +2650,6 @@ public class InternalEngine extends Engine {
final Map<String, String> commitData = new HashMap<>(7); final Map<String, String> commitData = new HashMap<>(7);
commitData.put(Translog.TRANSLOG_UUID_KEY, translog.getTranslogUUID()); commitData.put(Translog.TRANSLOG_UUID_KEY, translog.getTranslogUUID());
commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint));
if (syncId != null) {
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
}
commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo()));
commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get()));
commitData.put(HISTORY_UUID_KEY, historyUUID); commitData.put(HISTORY_UUID_KEY, historyUUID);

View File

@ -429,15 +429,7 @@ public class ReadOnlyEngine extends Engine {
} }
@Override @Override
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) { public void flush(boolean force, boolean waitIfOngoing) throws EngineException {}
// we can't do synced flushes this would require an indexWriter which we don't have
throw new UnsupportedOperationException("syncedFlush is not supported on a read-only engine");
}
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
return new CommitId(lastCommittedSegmentInfos.getId());
}
@Override @Override
public void forceMerge( public void forceMerge(

View File

@ -101,13 +101,6 @@ public interface IndexEventListener {
@Nullable String reason @Nullable String reason
) {} ) {}
/**
* Called when a shard is marked as inactive
*
* @param indexShard The shard that was marked inactive
*/
default void onShardInactive(IndexShard indexShard) {}
/** /**
* Called before the index gets created. Note that this is also called * Called before the index gets created. Note that this is also called
* when the index is created on data nodes * when the index is created on data nodes

View File

@ -1317,19 +1317,12 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return getEngine().completionStats(fields); return getEngine().completionStats(fields);
} }
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyNotClosed();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
return getEngine().syncFlush(syncId, expectedCommitId);
}
/** /**
* Executes the given flush request against the engine. * Executes the given flush request against the engine.
* *
* @param request the flush request * @param request the flush request
* @return the commit ID
*/ */
public Engine.CommitId flush(FlushRequest request) { public void flush(FlushRequest request) {
final boolean waitIfOngoing = request.waitIfOngoing(); final boolean waitIfOngoing = request.waitIfOngoing();
final boolean force = request.force(); final boolean force = request.force();
logger.trace("flush with {}", request); logger.trace("flush with {}", request);
@ -1340,9 +1333,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
*/ */
verifyNotClosed(); verifyNotClosed();
final long time = System.nanoTime(); final long time = System.nanoTime();
final Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing); getEngine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time); flushMetric.inc(System.nanoTime() - time);
return commitId;
} }
/** /**
@ -1966,7 +1958,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
onNewEngine(newEngine); onNewEngine(newEngine);
currentEngineReference.set(newEngine); currentEngineReference.set(newEngine);
// We set active because we are now writing operations to the engine; this way, // We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run. // we can flush if we go idle after some time and become inactive.
active.set(true); active.set(true);
} }
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during // time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
@ -2162,20 +2154,29 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
/** /**
* Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last * Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
* indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen. * indexing operation, so we can flush the index.
*/ */
public void checkIdle(long inactiveTimeNS) { public void flushOnIdle(long inactiveTimeNS) {
Engine engineOrNull = getEngineOrNull(); Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) { if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) {
boolean wasActive = active.getAndSet(false); boolean wasActive = active.getAndSet(false);
if (wasActive) { if (wasActive) {
logger.debug("shard is now inactive"); logger.debug("flushing shard on inactive");
try { threadPool.executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
indexEventListener.onShardInactive(this); @Override
} catch (Exception e) { public void onFailure(Exception e) {
logger.warn("failed to notify index event listener", e); if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush shard on inactive", e);
} }
} }
@Override
protected void doRun() {
flush(new FlushRequest().waitIfOngoing(false).force(false));
periodicFlushMetric.inc();
}
});
}
} }
} }

View File

@ -329,7 +329,7 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos
long totalBytesWriting = 0; long totalBytesWriting = 0;
for (IndexShard shard : availableShards()) { for (IndexShard shard : availableShards()) {
// Give shard a chance to transition to inactive so sync'd flush can happen: // Give shard a chance to transition to inactive so we can flush:
checkIdle(shard, inactiveTime.nanos()); checkIdle(shard, inactiveTime.nanos());
// How many bytes this shard is currently (async'd) moving from heap to disk: // How many bytes this shard is currently (async'd) moving from heap to disk:
@ -443,7 +443,7 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos
*/ */
protected void checkIdle(IndexShard shard, long inactiveTimeNS) { protected void checkIdle(IndexShard shard, long inactiveTimeNS) {
try { try {
shard.checkIdle(inactiveTimeNS); shard.flushOnIdle(inactiveTimeNS);
} catch (AlreadyClosedException e) { } catch (AlreadyClosedException e) {
logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
} }

View File

@ -72,7 +72,6 @@ import org.opensearch.index.seqno.RetentionLeaseSyncer;
import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.GlobalCheckpointSyncAction;
import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer;
import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.cluster.IndicesClusterStateService;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.mapper.MapperRegistry;
import org.opensearch.indices.store.IndicesStore; import org.opensearch.indices.store.IndicesStore;
import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata;
@ -270,7 +269,6 @@ public class IndicesModule extends AbstractModule {
protected void configure() { protected void configure() {
bind(IndicesStore.class).asEagerSingleton(); bind(IndicesStore.class).asEagerSingleton();
bind(IndicesClusterStateService.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton();
bind(SyncedFlushService.class).asEagerSingleton();
bind(TransportNodesListShardStoreMetadata.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetadata.class).asEagerSingleton();
bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton();
bind(TransportResyncReplicationAction.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton();

View File

@ -76,7 +76,6 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask;
import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardId;
import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.shard.ShardNotFoundException;
import org.opensearch.indices.IndicesService; import org.opensearch.indices.IndicesService;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoverySourceService;
import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.PeerRecoveryTargetService;
import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryFailedException;
@ -144,7 +143,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
final NodeMappingRefreshAction nodeMappingRefreshAction, final NodeMappingRefreshAction nodeMappingRefreshAction,
final RepositoriesService repositoriesService, final RepositoriesService repositoriesService,
final SearchService searchService, final SearchService searchService,
final SyncedFlushService syncedFlushService,
final PeerRecoverySourceService peerRecoverySourceService, final PeerRecoverySourceService peerRecoverySourceService,
final SnapshotShardsService snapshotShardsService, final SnapshotShardsService snapshotShardsService,
final PrimaryReplicaSyncer primaryReplicaSyncer, final PrimaryReplicaSyncer primaryReplicaSyncer,
@ -161,7 +159,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
nodeMappingRefreshAction, nodeMappingRefreshAction,
repositoriesService, repositoriesService,
searchService, searchService,
syncedFlushService,
peerRecoverySourceService, peerRecoverySourceService,
snapshotShardsService, snapshotShardsService,
primaryReplicaSyncer, primaryReplicaSyncer,
@ -181,7 +178,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
final NodeMappingRefreshAction nodeMappingRefreshAction, final NodeMappingRefreshAction nodeMappingRefreshAction,
final RepositoriesService repositoriesService, final RepositoriesService repositoriesService,
final SearchService searchService, final SearchService searchService,
final SyncedFlushService syncedFlushService,
final PeerRecoverySourceService peerRecoverySourceService, final PeerRecoverySourceService peerRecoverySourceService,
final SnapshotShardsService snapshotShardsService, final SnapshotShardsService snapshotShardsService,
final PrimaryReplicaSyncer primaryReplicaSyncer, final PrimaryReplicaSyncer primaryReplicaSyncer,
@ -189,13 +185,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
final RetentionLeaseSyncer retentionLeaseSyncer final RetentionLeaseSyncer retentionLeaseSyncer
) { ) {
this.settings = settings; this.settings = settings;
this.buildInIndexListener = Arrays.asList( this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService);
peerRecoverySourceService,
recoveryTargetService,
searchService,
syncedFlushService,
snapshotShardsService
);
this.indicesService = indicesService; this.indicesService = indicesService;
this.clusterService = clusterService; this.clusterService = clusterService;
this.threadPool = threadPool; this.threadPool = threadPool;

View File

@ -1,179 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.indices.flush;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.io.stream.Writeable;
import org.opensearch.index.shard.ShardId;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
/**
* Result for all copies of a shard
*/
public class ShardsSyncedFlushResult implements Writeable {
private String failureReason;
private Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses;
private String syncId;
private ShardId shardId;
// some shards may be unassigned, so we need this as state
private int totalShards;
public ShardsSyncedFlushResult(StreamInput in) throws IOException {
failureReason = in.readOptionalString();
int numResponses = in.readInt();
shardResponses = new HashMap<>();
for (int i = 0; i < numResponses; i++) {
ShardRouting shardRouting = new ShardRouting(in);
SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in);
shardResponses.put(shardRouting, response);
}
syncId = in.readOptionalString();
shardId = new ShardId(in);
totalShards = in.readInt();
}
public ShardId getShardId() {
return shardId;
}
/**
* failure constructor
*/
public ShardsSyncedFlushResult(ShardId shardId, int totalShards, String failureReason) {
this.syncId = null;
this.failureReason = failureReason;
this.shardResponses = emptyMap();
this.shardId = shardId;
this.totalShards = totalShards;
}
/**
* success constructor
*/
public ShardsSyncedFlushResult(
ShardId shardId,
String syncId,
int totalShards,
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses
) {
this.failureReason = null;
this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses));
this.syncId = syncId;
this.totalShards = totalShards;
this.shardId = shardId;
}
/**
* @return true if the operation failed before reaching step three of synced flush. {@link #failureReason()} can be used for
* more details
*/
public boolean failed() {
return failureReason != null;
}
/**
* @return the reason for the failure if synced flush failed before step three of synced flush
*/
public String failureReason() {
return failureReason;
}
public String syncId() {
return syncId;
}
/**
* @return total number of shards for which a sync attempt was made
*/
public int totalShards() {
return totalShards;
}
/**
* @return total number of successful shards
*/
public int successfulShards() {
int i = 0;
for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) {
if (result.success()) {
i++;
}
}
return i;
}
/**
* @return an array of shard failures
*/
public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards() {
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failures = new HashMap<>();
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> result : shardResponses.entrySet()) {
if (result.getValue().success() == false) {
failures.put(result.getKey(), result.getValue());
}
}
return failures;
}
/**
* @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush.
* Empty if synced flush failed before step three.
*/
public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses() {
return shardResponses;
}
public ShardId shardId() {
return shardId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(failureReason);
out.writeInt(shardResponses.size());
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> entry : shardResponses.entrySet()) {
entry.getKey().writeTo(out);
entry.getValue().writeTo(out);
}
out.writeOptionalString(syncId);
shardId.writeTo(out);
out.writeInt(totalShards);
}
}

View File

@ -1,891 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.indices.flush;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.opensearch.LegacyESVersion;
import org.opensearch.OpenSearchException;
import org.opensearch.action.ActionListener;
import org.opensearch.action.StepListener;
import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse;
import org.opensearch.action.support.IndicesOptions;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.metadata.IndexNameExpressionResolver;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.routing.IndexShardRoutingTable;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.Nullable;
import org.opensearch.common.Strings;
import org.opensearch.common.UUIDs;
import org.opensearch.common.inject.Inject;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.common.io.stream.StreamOutput;
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.ConcurrentCollections;
import org.opensearch.common.util.concurrent.CountDown;
import org.opensearch.index.Index;
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.index.IndexService;
import org.opensearch.index.engine.CommitStats;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.shard.IndexEventListener;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.IndexShardState;
import org.opensearch.index.shard.ShardId;
import org.opensearch.index.shard.ShardNotFoundException;
import org.opensearch.indices.IndexClosedException;
import org.opensearch.indices.IndicesService;
import org.opensearch.tasks.Task;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.TransportChannel;
import org.opensearch.transport.TransportException;
import org.opensearch.transport.TransportRequest;
import org.opensearch.transport.TransportRequestHandler;
import org.opensearch.transport.TransportResponse;
import org.opensearch.transport.TransportResponseHandler;
import org.opensearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
public class SyncedFlushService implements IndexEventListener {
private static final Logger logger = LogManager.getLogger(SyncedFlushService.class);
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(logger.getName());
public static final String SYNCED_FLUSH_DEPRECATION_MESSAGE =
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.";
private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync";
private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight";
private final IndicesService indicesService;
private final ClusterService clusterService;
private final TransportService transportService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public SyncedFlushService(
IndicesService indicesService,
ClusterService clusterService,
TransportService transportService,
IndexNameExpressionResolver indexNameExpressionResolver
) {
this.indicesService = indicesService;
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
transportService.registerRequestHandler(
PRE_SYNCED_FLUSH_ACTION_NAME,
ThreadPool.Names.FLUSH,
PreShardSyncedFlushRequest::new,
new PreSyncedFlushTransportHandler()
);
transportService.registerRequestHandler(
SYNCED_FLUSH_ACTION_NAME,
ThreadPool.Names.FLUSH,
ShardSyncedFlushRequest::new,
new SyncedFlushTransportHandler()
);
transportService.registerRequestHandler(
IN_FLIGHT_OPS_ACTION_NAME,
ThreadPool.Names.SAME,
InFlightOpsRequest::new,
new InFlightOpCountTransportHandler()
);
}
@Override
public void onShardInactive(final IndexShard indexShard) {
// A normal flush has the same effect as a synced flush if all nodes are on 7.6 or later.
final boolean preferNormalFlush = clusterService.state().nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_6_0);
if (preferNormalFlush) {
performNormalFlushOnInactive(indexShard);
} else if (indexShard.routingEntry().primary()) {
// we only want to call sync flush once, so only trigger it when we are on a primary
attemptSyncedFlush(indexShard.shardId(), new ActionListener<ShardsSyncedFlushResult>() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
logger.trace(
"{} sync flush on inactive shard returned successfully for sync_id: {}",
syncedFlushResult.getShardId(),
syncedFlushResult.syncId()
);
}
@Override
public void onFailure(Exception e) {
logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
}
});
}
}
private void performNormalFlushOnInactive(IndexShard shard) {
logger.debug("flushing shard {} on inactive", shard.routingEntry());
shard.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (shard.state() != IndexShardState.CLOSED) {
logger.warn(new ParameterizedMessage("failed to flush shard {} on inactive", shard.routingEntry()), e);
}
}
@Override
protected void doRun() {
shard.flush(new FlushRequest().force(false).waitIfOngoing(false));
}
});
}
/**
* a utility method to perform a synced flush for all shards of multiple indices.
* see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
public void attemptSyncedFlush(
final String[] aliasesOrIndices,
IndicesOptions indicesOptions,
final ActionListener<SyncedFlushResponse> listener
) {
final ClusterState state = clusterService.state();
if (state.nodes().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_6_0)) {
DEPRECATION_LOGGER.deprecate("synced_flush", SYNCED_FLUSH_DEPRECATION_MESSAGE);
}
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
int numberOfShards = 0;
for (Index index : concreteIndices) {
final IndexMetadata indexMetadata = state.metadata().getIndexSafe(index);
numberOfShards += indexMetadata.getNumberOfShards();
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
}
if (numberOfShards == 0) {
listener.onResponse(new SyncedFlushResponse(results));
return;
}
final CountDown countDown = new CountDown(numberOfShards);
for (final Index concreteIndex : concreteIndices) {
final String index = concreteIndex.getName();
final IndexMetadata indexMetadata = state.metadata().getIndexSafe(concreteIndex);
final int indexNumberOfShards = indexMetadata.getNumberOfShards();
for (int shard = 0; shard < indexNumberOfShards; shard++) {
final ShardId shardId = new ShardId(indexMetadata.getIndex(), shard);
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
@Override
public void onFailure(Exception e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
final int totalShards = indexMetadata.getNumberOfReplicas() + 1;
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
}
});
}
}
}
/*
* Tries to flush all copies of a shard and write a sync id to it.
* After a synced flush two shard copies may only contain the same sync id if they contain the same documents.
* To ensure this, synced flush works in three steps:
* 1. Flush all shard copies and gather the commit ids for each copy after the flush
* 2. Ensure that there are no ongoing indexing operations on the primary
* 3. Perform an additional flush on each shard copy that writes the sync id
*
* Step 3 is only executed on a shard if
* a) the shard has no uncommitted changes since the last flush
* b) the last flush was the one executed in 1 (use the collected commit id to verify this)
*
* This alone is not enough to ensure that all copies contain the same documents.
* Without step 2 a sync id would be written for inconsistent copies in the following scenario:
*
* Write operation has completed on a primary and is being sent to replicas. The write request does not reach the
* replicas until sync flush is finished.
* Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have.
* Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush
* committed them) and there are no uncommitted changes on the replica (the write operation has not reached the replica yet).
*
* Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary.
* Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only
* be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on
* the replica if it contains the same changes that the primary contains.
*
* Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
**/
public void attemptSyncedFlush(final ShardId shardId, final ActionListener<ShardsSyncedFlushResult> actionListener) {
innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener);
}
private void innerAttemptSyncedFlush(
final ShardId shardId,
final ClusterState state,
final ActionListener<ShardsSyncedFlushResult> actionListener
) {
try {
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
final int totalShards = shardRoutingTable.getSize();
if (activeShards.size() == 0) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards"));
return;
}
// 1. send pre-sync flushes to all replicas
final StepListener<Map<String, PreSyncedFlushResponse>> presyncStep = new StepListener<>();
sendPreSyncRequests(activeShards, state, shardId, presyncStep);
// 2. fetch in flight operations
final StepListener<InFlightOpsResponse> inflightOpsStep = new StepListener<>();
presyncStep.whenComplete(presyncResponses -> {
if (presyncResponses.isEmpty()) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync"));
} else {
getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsStep);
}
}, actionListener::onFailure);
// 3. now send the sync request to all the shards
inflightOpsStep.whenComplete(inFlightOpsResponse -> {
final Map<String, PreSyncedFlushResponse> presyncResponses = presyncStep.result();
final int inflight = inFlightOpsResponse.opCount();
assert inflight >= 0;
if (inflight != 0) {
actionListener.onResponse(
new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary")
);
} else {
final String sharedSyncId = sharedExistingSyncId(presyncResponses);
if (sharedSyncId != null) {
assert presyncResponses.values()
.stream()
.allMatch(r -> r.existingSyncId.equals(sharedSyncId)) : "Not all shards have the same existing sync id ["
+ sharedSyncId
+ "], responses ["
+ presyncResponses
+ "]";
reportSuccessWithExistingSyncId(shardId, sharedSyncId, activeShards, totalShards, presyncResponses, actionListener);
} else {
String syncId = UUIDs.randomBase64UUID();
sendSyncRequests(syncId, activeShards, state, presyncResponses, shardId, totalShards, actionListener);
}
}
}, actionListener::onFailure);
} catch (Exception e) {
actionListener.onFailure(e);
}
}
private String sharedExistingSyncId(Map<String, PreSyncedFlushResponse> preSyncedFlushResponses) {
String existingSyncId = null;
for (PreSyncedFlushResponse resp : preSyncedFlushResponses.values()) {
if (Strings.isNullOrEmpty(resp.existingSyncId)) {
return null;
}
if (existingSyncId == null) {
existingSyncId = resp.existingSyncId;
}
if (existingSyncId.equals(resp.existingSyncId) == false) {
return null;
}
}
return existingSyncId;
}
private void reportSuccessWithExistingSyncId(
ShardId shardId,
String existingSyncId,
List<ShardRouting> shards,
int totalShards,
Map<String, PreSyncedFlushResponse> preSyncResponses,
ActionListener<ShardsSyncedFlushResult> listener
) {
final Map<ShardRouting, ShardSyncedFlushResponse> results = new HashMap<>();
for (final ShardRouting shard : shards) {
if (preSyncResponses.containsKey(shard.currentNodeId())) {
results.put(shard, new ShardSyncedFlushResponse((String) null));
}
}
listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results));
}
final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) {
final IndexMetadata indexMetadata = state.getMetadata().index(shardId.getIndex());
if (indexMetadata == null) {
throw new IndexNotFoundException(shardId.getIndexName());
} else if (indexMetadata.getState() == IndexMetadata.State.CLOSE) {
throw new IndexClosedException(shardId.getIndex());
}
final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetadata.getIndex()).shard(shardId.id());
if (shardRoutingTable == null) {
throw new ShardNotFoundException(shardId);
}
return shardRoutingTable;
}
/**
* returns the number of in flight operations on primary. -1 upon error.
*/
protected void getInflightOpsCount(
final ShardId shardId,
ClusterState state,
IndexShardRoutingTable shardRoutingTable,
final ActionListener<InFlightOpsResponse> listener
) {
try {
final ShardRouting primaryShard = shardRoutingTable.primaryShard();
final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId());
if (primaryNode == null) {
logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard);
listener.onResponse(new InFlightOpsResponse(-1));
return;
}
logger.trace("{} retrieving in flight operation count", shardId);
transportService.sendRequest(
primaryNode,
IN_FLIGHT_OPS_ACTION_NAME,
new InFlightOpsRequest(shardId),
new TransportResponseHandler<InFlightOpsResponse>() {
@Override
public InFlightOpsResponse read(StreamInput in) throws IOException {
return new InFlightOpsResponse(in);
}
@Override
public void handleResponse(InFlightOpsResponse response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
logger.debug("{} unexpected error while retrieving in flight op count", shardId);
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
);
} catch (Exception e) {
listener.onFailure(e);
}
}
private int numDocsOnPrimary(List<ShardRouting> shards, Map<String, PreSyncedFlushResponse> preSyncResponses) {
for (ShardRouting shard : shards) {
if (shard.primary()) {
final PreSyncedFlushResponse resp = preSyncResponses.get(shard.currentNodeId());
if (resp != null) {
return resp.numDocs;
}
}
}
return PreSyncedFlushResponse.UNKNOWN_NUM_DOCS;
}
void sendSyncRequests(
final String syncId,
final List<ShardRouting> shards,
ClusterState state,
Map<String, PreSyncedFlushResponse> preSyncResponses,
final ShardId shardId,
final int totalShards,
final ActionListener<ShardsSyncedFlushResult> listener
) {
final CountDown countDown = new CountDown(shards.size());
final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
final int numDocsOnPrimary = numDocsOnPrimary(shards, preSyncResponses);
for (final ShardRouting shard : shards) {
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
results.put(shard, new ShardSyncedFlushResponse("unknown node"));
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
final PreSyncedFlushResponse preSyncedResponse = preSyncResponses.get(shard.currentNodeId());
if (preSyncedResponse == null) {
logger.trace(
"{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}",
shardId,
syncId,
shard
);
results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
if (preSyncedResponse.numDocs != numDocsOnPrimary
&& preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS
&& numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) {
logger.debug(
"{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]",
shardId,
syncId,
shard,
preSyncedResponse.numDocs,
numDocsOnPrimary
);
results.put(
shard,
new ShardSyncedFlushResponse(
"ongoing indexing operations: "
+ "num docs on replica ["
+ preSyncedResponse.numDocs
+ "]; num docs on primary ["
+ numDocsOnPrimary
+ "]"
)
);
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
ShardSyncedFlushRequest syncedFlushRequest = new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId);
transportService.sendRequest(
node,
SYNCED_FLUSH_ACTION_NAME,
syncedFlushRequest,
new TransportResponseHandler<ShardSyncedFlushResponse>() {
@Override
public ShardSyncedFlushResponse read(StreamInput in) throws IOException {
return new ShardSyncedFlushResponse(in);
}
@Override
public void handleResponse(ShardSyncedFlushResponse response) {
ShardSyncedFlushResponse existing = results.put(shard, response);
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public void handleException(TransportException exp) {
logger.trace(
() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard),
exp
);
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
);
}
}
private void countDownAndSendResponseIfDone(
String syncId,
List<ShardRouting> shards,
ShardId shardId,
int totalShards,
ActionListener<ShardsSyncedFlushResult> listener,
CountDown countDown,
Map<ShardRouting, ShardSyncedFlushResponse> results
) {
if (countDown.countDown()) {
assert results.size() == shards.size();
listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results));
}
}
/**
* send presync requests to all started copies of the given shard
*/
void sendPreSyncRequests(
final List<ShardRouting> shards,
final ClusterState state,
final ShardId shardId,
final ActionListener<Map<String, PreSyncedFlushResponse>> listener
) {
final CountDown countDown = new CountDown(shards.size());
final ConcurrentMap<String, PreSyncedFlushResponse> presyncResponses = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
if (countDown.countDown()) {
listener.onResponse(presyncResponses);
}
continue;
}
transportService.sendRequest(
node,
PRE_SYNCED_FLUSH_ACTION_NAME,
new PreShardSyncedFlushRequest(shard.shardId()),
new TransportResponseHandler<PreSyncedFlushResponse>() {
@Override
public PreSyncedFlushResponse read(StreamInput in) throws IOException {
return new PreSyncedFlushResponse(in);
}
@Override
public void handleResponse(PreSyncedFlushResponse response) {
PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response);
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
if (countDown.countDown()) {
listener.onResponse(presyncResponses);
}
}
@Override
public void handleException(TransportException exp) {
logger.trace(
() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard),
exp
);
if (countDown.countDown()) {
listener.onResponse(presyncResponses);
}
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
}
);
}
}
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId());
indexShard.flush(flushRequest);
final CommitStats commitStats = indexShard.commitStats();
final Engine.CommitId commitId = commitStats.getRawCommitId();
logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs());
return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId());
}
private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
logger.trace(
"{} performing sync flush. sync id [{}], expected commit id {}",
request.shardId(),
request.syncId(),
request.expectedCommitId()
);
Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId());
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
switch (result) {
case SUCCESS:
return new ShardSyncedFlushResponse((String) null);
case COMMIT_MISMATCH:
return new ShardSyncedFlushResponse("commit has changed");
case PENDING_OPERATIONS:
return new ShardSyncedFlushResponse("pending operations");
default:
throw new OpenSearchException("unknown synced flush result [" + result + "]");
}
}
private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() + "] expected a primary shard");
}
int opCount = indexShard.getActiveOperationsCount();
return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount);
}
public static final class PreShardSyncedFlushRequest extends TransportRequest {
private ShardId shardId;
public PreShardSyncedFlushRequest(StreamInput in) throws IOException {
super(in);
this.shardId = new ShardId(in);
}
public PreShardSyncedFlushRequest(ShardId shardId) {
this.shardId = shardId;
}
@Override
public String toString() {
return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}';
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
}
public ShardId shardId() {
return shardId;
}
}
/**
* Response for first step of synced flush (flush) for one shard copy
*/
static final class PreSyncedFlushResponse extends TransportResponse {
static final int UNKNOWN_NUM_DOCS = -1;
Engine.CommitId commitId;
int numDocs;
@Nullable
String existingSyncId = null;
PreSyncedFlushResponse(StreamInput in) throws IOException {
super(in);
commitId = new Engine.CommitId(in);
numDocs = in.readInt();
existingSyncId = in.readOptionalString();
}
PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) {
this.commitId = commitId;
this.numDocs = numDocs;
this.existingSyncId = existingSyncId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
commitId.writeTo(out);
out.writeInt(numDocs);
out.writeOptionalString(existingSyncId);
}
}
public static final class ShardSyncedFlushRequest extends TransportRequest {
private String syncId;
private Engine.CommitId expectedCommitId;
private ShardId shardId;
public ShardSyncedFlushRequest(StreamInput in) throws IOException {
super(in);
shardId = new ShardId(in);
expectedCommitId = new Engine.CommitId(in);
syncId = in.readString();
}
public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
this.expectedCommitId = expectedCommitId;
this.shardId = shardId;
this.syncId = syncId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
expectedCommitId.writeTo(out);
out.writeString(syncId);
}
public ShardId shardId() {
return shardId;
}
public String syncId() {
return syncId;
}
public Engine.CommitId expectedCommitId() {
return expectedCommitId;
}
@Override
public String toString() {
return "ShardSyncedFlushRequest{" + "shardId=" + shardId + ",syncId='" + syncId + '\'' + '}';
}
}
/**
* Response for third step of synced flush (writing the sync id) for one shard copy
*/
public static final class ShardSyncedFlushResponse extends TransportResponse {
/**
* a non null value indicates a failure to sync flush. null means success
*/
String failureReason;
public ShardSyncedFlushResponse(StreamInput in) throws IOException {
super(in);
failureReason = in.readOptionalString();
}
public ShardSyncedFlushResponse(String failureReason) {
this.failureReason = failureReason;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(failureReason);
}
public boolean success() {
return failureReason == null;
}
public String failureReason() {
return failureReason;
}
@Override
public String toString() {
return "ShardSyncedFlushResponse{" + "success=" + success() + ", failureReason='" + failureReason + '\'' + '}';
}
public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException {
return new ShardSyncedFlushResponse(in);
}
}
public static final class InFlightOpsRequest extends TransportRequest {
private ShardId shardId;
public InFlightOpsRequest(StreamInput in) throws IOException {
super(in);
shardId = new ShardId(in);
}
public InFlightOpsRequest(ShardId shardId) {
this.shardId = shardId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
}
public ShardId shardId() {
return shardId;
}
@Override
public String toString() {
return "InFlightOpsRequest{" + "shardId=" + shardId + '}';
}
}
/**
* Response for second step of synced flush (check operations in flight)
*/
static final class InFlightOpsResponse extends TransportResponse {
int opCount;
InFlightOpsResponse(StreamInput in) throws IOException {
super(in);
opCount = in.readVInt();
}
InFlightOpsResponse(int opCount) {
assert opCount >= 0 : opCount;
this.opCount = opCount;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(opCount);
}
public int opCount() {
return opCount;
}
@Override
public String toString() {
return "InFlightOpsResponse{" + "opCount=" + opCount + '}';
}
}
private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
@Override
public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception {
channel.sendResponse(performPreSyncedFlush(request));
}
}
private final class SyncedFlushTransportHandler implements TransportRequestHandler<ShardSyncedFlushRequest> {
@Override
public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception {
channel.sendResponse(performSyncedFlush(request));
}
}
private final class InFlightOpCountTransportHandler implements TransportRequestHandler<InFlightOpsRequest> {
@Override
public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception {
channel.sendResponse(performInFlightOps(request));
}
}
}

View File

@ -32,17 +32,20 @@
package org.opensearch.rest.action.admin.indices; package org.opensearch.rest.action.admin.indices;
import org.opensearch.action.admin.indices.flush.SyncedFlushRequest; import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse; import org.opensearch.action.admin.indices.flush.FlushResponse;
import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.IndicesOptions;
import org.opensearch.client.node.NodeClient; import org.opensearch.client.node.NodeClient;
import org.opensearch.common.Strings; import org.opensearch.common.Strings;
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BaseRestHandler;
import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.BytesRestResponse;
import org.opensearch.rest.RestChannel;
import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestRequest;
import org.opensearch.rest.RestResponse; import org.opensearch.rest.RestResponse;
import org.opensearch.rest.action.RestBuilderListener; import org.opensearch.rest.RestStatus;
import org.opensearch.rest.action.RestToXContentListener;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
@ -54,6 +57,8 @@ import static org.opensearch.rest.RestRequest.Method.POST;
public class RestSyncedFlushAction extends BaseRestHandler { public class RestSyncedFlushAction extends BaseRestHandler {
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestSyncedFlushAction.class);
@Override @Override
public List<Route> routes() { public List<Route> routes() {
return unmodifiableList( return unmodifiableList(
@ -73,17 +78,37 @@ public class RestSyncedFlushAction extends BaseRestHandler {
@Override @Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); DEPRECATION_LOGGER.deprecate(
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); "synced_flush",
syncedFlushRequest.indicesOptions(indicesOptions); "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."
return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener<SyncedFlushResponse>(channel) { );
@Override final FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
builder.startObject(); return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel));
results.toXContent(builder, request); }
builder.endObject();
return new BytesRestResponse(results.restStatus(), builder); static final class SimulateSyncedFlushResponseListener extends RestToXContentListener<FlushResponse> {
SimulateSyncedFlushResponseListener(RestChannel channel) {
super(channel);
}
@Override
public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception {
builder.startObject();
buildSyncedFlushResponse(builder, flushResponse);
builder.endObject();
final RestStatus restStatus = flushResponse.getFailedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT;
return new BytesRestResponse(restStatus, builder);
}
private void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException {
builder.startObject("_shards");
builder.field("total", flushResponse.getTotalShards());
builder.field("successful", flushResponse.getSuccessfulShards());
builder.field("failed", flushResponse.getFailedShards());
// can't serialize the detail of each index as we don't have the shard count per index.
builder.endObject();
} }
});
} }
} }

View File

@ -53,7 +53,6 @@ import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings;
import org.opensearch.index.engine.Engine;
import org.opensearch.common.unit.TimeValue; import org.opensearch.common.unit.TimeValue;
import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.index.shard.ReplicationGroup;
@ -78,6 +77,7 @@ import java.util.Set;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.mockito.Mockito.doNothing;
import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state;
import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.createClusterService;
import static org.opensearch.test.ClusterServiceUtils.setState; import static org.opensearch.test.ClusterServiceUtils.setState;
@ -194,8 +194,7 @@ public class TransportVerifyShardBeforeCloseActionTests extends OpenSearchTestCa
public void testShardIsFlushed() throws Throwable { public void testShardIsFlushed() throws Throwable {
final ArgumentCaptor<FlushRequest> flushRequest = ArgumentCaptor.forClass(FlushRequest.class); final ArgumentCaptor<FlushRequest> flushRequest = ArgumentCaptor.forClass(FlushRequest.class);
when(indexShard.flush(flushRequest.capture())).thenReturn(new Engine.CommitId(new byte[0])); doNothing().when(indexShard).flush(flushRequest.capture());
executeOnPrimaryOrReplica(); executeOnPrimaryOrReplica();
verify(indexShard, times(1)).flush(any(FlushRequest.class)); verify(indexShard, times(1)).flush(any(FlushRequest.class));
assertThat(flushRequest.getValue().force(), is(true)); assertThat(flushRequest.getValue().force(), is(true));

View File

@ -1,208 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.action.admin.indices.flush;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntMap;
import org.opensearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.TestShardRouting;
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.io.stream.StreamInput;
import org.opensearch.index.shard.ShardId;
import org.opensearch.indices.flush.ShardsSyncedFlushResult;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.rest.RestStatus;
import org.opensearch.test.OpenSearchTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.opensearch.test.XContentTestUtils.convertToMap;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public class SyncedFlushUnitTests extends OpenSearchTestCase {
private static class TestPlan {
public SyncedFlushResponse.ShardCounts totalCounts;
public Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
public SyncedFlushResponse result;
}
public void testIndicesSyncedFlushResult() throws IOException {
final TestPlan testPlan = createTestPlan();
assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
Map<String, Object> asMap = convertToMap(testPlan.result);
assertShardCount("_shards header", (Map<String, Object>) asMap.get("_shards"), testPlan.totalCounts);
assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header
for (String index : testPlan.countsPerIndex.keySet()) {
Map<String, Object> indexMap = (Map<String, Object>) asMap.get(index);
assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index));
List<Map<String, Object>> failureList = (List<Map<String, Object>>) indexMap.get("failures");
final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index);
if (expectedFailures == 0) {
assertNull(index + " has unexpected failures", failureList);
} else {
assertNotNull(index + " should have failures", failureList);
assertThat(failureList, hasSize(expectedFailures));
}
}
}
public void testResponseStreaming() throws IOException {
final TestPlan testPlan = createTestPlan();
assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
BytesStreamOutput out = new BytesStreamOutput();
testPlan.result.writeTo(out);
StreamInput in = out.bytes().streamInput();
SyncedFlushResponse readResponse = new SyncedFlushResponse(in);
assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total));
assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful));
assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed));
assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size()));
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : readResponse.getShardsResultPerIndex().entrySet()) {
List<ShardsSyncedFlushResult> originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey());
assertNotNull(originalShardsResults);
List<ShardsSyncedFlushResult> readShardsResults = entry.getValue();
assertThat(readShardsResults.size(), equalTo(originalShardsResults.size()));
for (int i = 0; i < readShardsResults.size(); i++) {
ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i);
ShardsSyncedFlushResult readShardResult = readShardsResults.get(i);
assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason()));
assertThat(originalShardResult.failed(), equalTo(readShardResult.failed()));
assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId()));
assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards()));
assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId()));
assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards()));
assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size()));
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.failedShards()
.entrySet()) {
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey());
assertNotNull(readShardResponse);
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
}
assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size()));
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.shardResponses()
.entrySet()) {
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses()
.get(shardEntry.getKey());
assertNotNull(readShardResponse);
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
}
}
}
}
private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) {
assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total));
assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful));
assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed));
}
protected TestPlan createTestPlan() {
final TestPlan testPlan = new TestPlan();
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
final int indexCount = randomIntBetween(1, 10);
int totalShards = 0;
int totalSuccesful = 0;
int totalFailed = 0;
for (int i = 0; i < indexCount; i++) {
final String index = "index_" + i;
int shards = randomIntBetween(1, 4);
int replicas = randomIntBetween(0, 2);
int successful = 0;
int failed = 0;
int failures = 0;
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
for (int shard = 0; shard < shards; shard++) {
final ShardId shardId = new ShardId(index, "_na_", shard);
if (randomInt(5) < 2) {
// total shard failure
failed += replicas + 1;
failures++;
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
} else {
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
for (int copy = 0; copy < replicas + 1; copy++) {
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
index,
shard,
"node_" + shardId + "_" + copy,
null,
copy == 0,
ShardRoutingState.STARTED
);
if (randomInt(5) < 2) {
// shard copy failure
failed++;
failures++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
} else {
successful++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null));
}
}
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
}
}
indicesResults.put(index, shardsResults);
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
testPlan.expectedFailuresPerIndex.put(index, failures);
totalFailed += failed;
totalShards += shards * (replicas + 1);
totalSuccesful += successful;
}
testPlan.result = new SyncedFlushResponse(indicesResults);
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed);
return testPlan;
}
}

View File

@ -82,6 +82,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.SetOnce;
import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchException;
import org.opensearch.Version; import org.opensearch.Version;
import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListener;
@ -115,6 +116,7 @@ import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentCollections;
import org.opensearch.common.util.concurrent.ReleasableLock;
import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.XContentType;
import org.opensearch.core.internal.io.IOUtils; import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSettings;
@ -165,7 +167,6 @@ import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Base64;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
@ -548,10 +549,9 @@ public class InternalEngineTests extends EngineTestCase {
: randomIntBetween(0, (int) localCheckpoint.get()) : randomIntBetween(0, (int) localCheckpoint.get())
); );
final Engine.CommitId commitId = engine.flush(true, true); engine.flush(true, true);
CommitStats stats2 = engine.commitStats(); CommitStats stats2 = engine.commitStats();
assertThat(stats2.getRawCommitId(), equalTo(commitId));
assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration())); assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration()));
assertThat(stats2.getId(), notNullValue()); assertThat(stats2.getId(), notNullValue());
assertThat(stats2.getId(), not(equalTo(stats1.getId()))); assertThat(stats2.getId(), not(equalTo(stats1.getId())));
@ -660,9 +660,9 @@ public class InternalEngineTests extends EngineTestCase {
recoveringEngine = new InternalEngine(initialEngine.config()) { recoveringEngine = new InternalEngine(initialEngine.config()) {
@Override @Override
protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException {
committed.set(true); committed.set(true);
super.commitIndexWriter(writer, translog, syncId); super.commitIndexWriter(writer, translog);
} }
}; };
assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs));
@ -1116,137 +1116,31 @@ public class InternalEngineTests extends EngineTestCase {
checker.run(); checker.run();
} }
public void testSyncedFlush() throws IOException {
try (
Store store = createStore();
Engine engine = createEngine(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null)
) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
Engine.CommitId commitID = engine.flush();
assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId())));
byte[] wrongBytes = Base64.getDecoder().decode(commitID.toString());
wrongBytes[0] = (byte) ~wrongBytes[0];
Engine.CommitId wrongId = new Engine.CommitId(wrongBytes);
assertEquals(
"should fail to sync flush with wrong id (but no docs)",
engine.syncFlush(syncId + "1", wrongId),
Engine.SyncedFlushResult.COMMIT_MISMATCH
);
engine.index(indexForDoc(doc));
assertEquals(
"should fail to sync flush with right id but pending doc",
engine.syncFlush(syncId + "2", commitID),
Engine.SyncedFlushResult.PENDING_OPERATIONS
);
commitID = engine.flush();
assertEquals(
"should succeed to flush commit with right id and no pending doc",
engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS
);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
}
}
public void testRenewSyncFlush() throws Exception {
final int iters = randomIntBetween(2, 5); // run this a couple of times to get some coverage
for (int i = 0; i < iters; i++) {
try (
Store store = createStore();
InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))
) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null));
engine.index(doc1);
assertEquals(engine.getLastWriteNanos(), doc1.startTime());
engine.flush();
Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null));
engine.index(doc2);
assertEquals(engine.getLastWriteNanos(), doc2.startTime());
engine.flush();
final boolean forceMergeFlushes = randomBoolean();
final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null);
if (forceMergeFlushes) {
engine.index(
new Engine.Index(
newUid(parsedDoc3),
parsedDoc3,
UNASSIGNED_SEQ_NO,
0,
Versions.MATCH_ANY,
VersionType.INTERNAL,
Engine.Operation.Origin.PRIMARY,
System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(),
-1,
false,
UNASSIGNED_SEQ_NO,
0
)
);
} else {
engine.index(indexForDoc(parsedDoc3));
}
Engine.CommitId commitID = engine.flush();
assertEquals(
"should succeed to flush commit with right id and no pending doc",
engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS
);
assertEquals(3, engine.segments(false).size());
engine.forceMerge(forceMergeFlushes, 1, false, false, false, UUIDs.randomBase64UUID());
if (forceMergeFlushes == false) {
engine.refresh("make all segments visible");
assertEquals(4, engine.segments(false).size());
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertTrue(engine.tryRenewSyncCommit());
assertEquals(1, engine.segments(false).size());
} else {
engine.refresh("test");
assertBusy(() -> assertEquals(1, engine.segments(false).size()));
}
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
if (randomBoolean()) {
Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null));
engine.index(doc4);
assertEquals(engine.getLastWriteNanos(), doc4.startTime());
} else {
Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid(), primaryTerm.get());
engine.delete(delete);
assertEquals(engine.getLastWriteNanos(), delete.startTime());
}
assertFalse(engine.tryRenewSyncCommit());
// we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
engine.flush(false, true);
assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID));
assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
}
}
}
public void testSyncedFlushSurvivesEngineRestart() throws IOException { public void testSyncedFlushSurvivesEngineRestart() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
IOUtils.close(store, engine); IOUtils.close(store, engine);
SetOnce<IndexWriter> indexWriterHolder = new SetOnce<>();
IndexWriterFactory indexWriterFactory = (directory, iwc) -> {
indexWriterHolder.set(new IndexWriter(directory, iwc));
return indexWriterHolder.get();
};
store = createStore(); store = createStore();
engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get); engine = createEngine(
defaultSettings,
store,
primaryTranslogDir,
newMergePolicy(),
indexWriterFactory,
null,
globalCheckpoint::get
);
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
engine.index(indexForDoc(doc)); engine.index(indexForDoc(doc));
globalCheckpoint.set(0L); globalCheckpoint.set(0L);
final Engine.CommitId commitID = engine.flush(); engine.flush();
assertEquals( syncFlush(indexWriterHolder.get(), engine, syncId);
"should succeed to flush commit with right id and no pending doc",
engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS
);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
EngineConfig config = engine.config(); EngineConfig config = engine.config();
if (randomBoolean()) { if (randomBoolean()) {
engine.close(); engine.close();
@ -1268,17 +1162,30 @@ public class InternalEngineTests extends EngineTestCase {
} }
public void testSyncedFlushVanishesOnReplay() throws IOException { public void testSyncedFlushVanishesOnReplay() throws IOException {
IOUtils.close(store, engine);
SetOnce<IndexWriter> indexWriterHolder = new SetOnce<>();
IndexWriterFactory indexWriterFactory = (directory, iwc) -> {
indexWriterHolder.set(new IndexWriter(directory, iwc));
return indexWriterHolder.get();
};
store = createStore();
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
engine = createEngine(
defaultSettings,
store,
primaryTranslogDir,
newMergePolicy(),
indexWriterFactory,
null,
globalCheckpoint::get
);
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
globalCheckpoint.set(engine.getProcessedLocalCheckpoint());
engine.index(indexForDoc(doc)); engine.index(indexForDoc(doc));
final Engine.CommitId commitID = engine.flush(); engine.flush();
assertEquals( syncFlush(indexWriterHolder.get(), engine, syncId);
"should succeed to flush commit with right id and no pending doc",
engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS
);
assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
doc = testParsedDocument("2", null, testDocumentWithTextField(), new BytesArray("{}"), null); doc = testParsedDocument("2", null, testDocumentWithTextField(), new BytesArray("{}"), null);
engine.index(indexForDoc(doc)); engine.index(indexForDoc(doc));
EngineConfig config = engine.config(); EngineConfig config = engine.config();
@ -1291,6 +1198,16 @@ public class InternalEngineTests extends EngineTestCase {
); );
} }
void syncFlush(IndexWriter writer, InternalEngine engine, String syncId) throws IOException {
try (ReleasableLock ignored = engine.writeLock.acquire()) {
Map<String, String> userData = new HashMap<>();
writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue()));
userData.put(Engine.SYNC_COMMIT_ID, syncId);
writer.setLiveCommitData(userData.entrySet());
writer.commit();
}
}
public void testVersioningNewCreate() throws IOException { public void testVersioningNewCreate() throws IOException {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED);
@ -3249,8 +3166,8 @@ public class InternalEngineTests extends EngineTestCase {
) { ) {
@Override @Override
protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException {
super.commitIndexWriter(writer, translog, syncId); super.commitIndexWriter(writer, translog);
if (throwErrorOnCommit.get()) { if (throwErrorOnCommit.get()) {
throw new RuntimeException("power's out"); throw new RuntimeException("power's out");
} }
@ -5588,14 +5505,14 @@ public class InternalEngineTests extends EngineTestCase {
final AtomicLong lastSyncedGlobalCheckpointBeforeCommit = new AtomicLong(Translog.readGlobalCheckpoint(translogPath, translogUUID)); final AtomicLong lastSyncedGlobalCheckpointBeforeCommit = new AtomicLong(Translog.readGlobalCheckpoint(translogPath, translogUUID));
try (InternalEngine engine = new InternalEngine(engineConfig) { try (InternalEngine engine = new InternalEngine(engineConfig) {
@Override @Override
protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { protected void commitIndexWriter(IndexWriter writer, Translog translog) throws IOException {
lastSyncedGlobalCheckpointBeforeCommit.set(Translog.readGlobalCheckpoint(translogPath, translogUUID)); lastSyncedGlobalCheckpointBeforeCommit.set(Translog.readGlobalCheckpoint(translogPath, translogUUID));
// Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog // Advance the global checkpoint during the flush to create a lag between a persisted global checkpoint in the translog
// (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService. // (this value is visible to the deletion policy) and an in memory global checkpoint in the SequenceNumbersService.
if (rarely()) { if (rarely()) {
globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getPersistedLocalCheckpoint())); globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), getPersistedLocalCheckpoint()));
} }
super.commitIndexWriter(writer, translog, syncId); super.commitIndexWriter(writer, translog);
} }
}) { }) {
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);

View File

@ -150,50 +150,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
} }
} }
public void testFlushes() throws IOException {
IOUtils.close(engine, store);
Engine readOnlyEngine = null;
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
try (Store store = createStore()) {
EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
int numDocs = scaledRandomIntBetween(10, 1000);
try (InternalEngine engine = createEngine(config)) {
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
engine.index(
new Engine.Index(
newUid(doc),
doc,
i,
primaryTerm.get(),
1,
null,
Engine.Operation.Origin.REPLICA,
System.nanoTime(),
-1,
false,
SequenceNumbers.UNASSIGNED_SEQ_NO,
0
)
);
if (rarely()) {
engine.flush();
}
engine.syncTranslog(); // advance persisted local checkpoint
globalCheckpoint.set(engine.getPersistedLocalCheckpoint());
}
globalCheckpoint.set(engine.getPersistedLocalCheckpoint());
engine.syncTranslog();
engine.flushAndClose();
readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null, null, true, Function.identity(), true);
Engine.CommitId flush = readOnlyEngine.flush(randomBoolean(), true);
assertEquals(flush, readOnlyEngine.flush(randomBoolean(), true));
} finally {
IOUtils.close(readOnlyEngine);
}
}
}
public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException { public void testEnsureMaxSeqNoIsEqualToGlobalCheckpoint() throws IOException {
IOUtils.close(engine, store); IOUtils.close(engine, store);
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
@ -263,7 +219,6 @@ public class ReadOnlyEngineTests extends EngineTestCase {
expectThrows(expectedException, () -> readOnlyEngine.index(null)); expectThrows(expectedException, () -> readOnlyEngine.index(null));
expectThrows(expectedException, () -> readOnlyEngine.delete(null)); expectThrows(expectedException, () -> readOnlyEngine.delete(null));
expectThrows(expectedException, () -> readOnlyEngine.noOp(null)); expectThrows(expectedException, () -> readOnlyEngine.noOp(null));
expectThrows(UnsupportedOperationException.class, () -> readOnlyEngine.syncFlush(null, null));
} }
} }
} }

View File

@ -103,7 +103,6 @@ import org.opensearch.index.engine.EngineTestCase;
import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngine;
import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.InternalEngineFactory;
import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.engine.ReadOnlyEngine;
import org.opensearch.index.engine.Segment;
import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.engine.SegmentsStats;
import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.FieldDataStats;
import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData;
@ -3871,7 +3870,7 @@ public class IndexShardTests extends IndexShardTestCase {
indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}"); indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}");
assertFalse(primary.scheduledRefresh()); assertFalse(primary.scheduledRefresh());
assertTrue(primary.isSearchIdle()); assertTrue(primary.isSearchIdle());
primary.checkIdle(0); primary.flushOnIdle(0);
assertTrue(primary.scheduledRefresh()); // make sure we refresh once the shard is inactive assertTrue(primary.scheduledRefresh()); // make sure we refresh once the shard is inactive
try (Engine.Searcher searcher = primary.acquireSearcher("test")) { try (Engine.Searcher searcher = primary.acquireSearcher("test")) {
assertEquals(3, searcher.getIndexReader().numDocs()); assertEquals(3, searcher.getIndexReader().numDocs());
@ -4083,92 +4082,6 @@ public class IndexShardTests extends IndexShardTestCase {
assertThat(breaker.getUsed(), equalTo(0L)); assertThat(breaker.getUsed(), equalTo(0L));
} }
public void testFlushOnInactive() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.build();
IndexMetadata metadata = IndexMetadata.builder("test")
.putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1)
.build();
ShardRouting shardRouting = TestShardRouting.newShardRouting(
new ShardId(metadata.getIndex(), 0),
"n1",
true,
ShardRoutingState.INITIALIZING,
RecoverySource.EmptyStoreRecoverySource.INSTANCE
);
final ShardId shardId = shardRouting.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
AtomicBoolean markedInactive = new AtomicBoolean();
AtomicReference<IndexShard> primaryRef = new AtomicReference<>();
IndexShard primary = newShard(
shardRouting,
shardPath,
metadata,
null,
null,
new InternalEngineFactory(),
new EngineConfigFactory(new IndexSettings(metadata, settings)),
() -> {},
RetentionLeaseSyncer.EMPTY,
new IndexEventListener() {
@Override
public void onShardInactive(IndexShard indexShard) {
markedInactive.set(true);
primaryRef.get().flush(new FlushRequest());
}
}
);
primaryRef.set(primary);
recoverShardFromStore(primary);
for (int i = 0; i < 3; i++) {
indexDoc(primary, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
primary.refresh("test"); // produce segments
}
List<Segment> segments = primary.segments(false);
Set<String> names = new HashSet<>();
for (Segment segment : segments) {
assertFalse(segment.committed);
assertTrue(segment.search);
names.add(segment.getName());
}
assertEquals(3, segments.size());
primary.flush(new FlushRequest());
primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(false));
primary.refresh("test");
segments = primary.segments(false);
for (Segment segment : segments) {
if (names.contains(segment.getName())) {
assertTrue(segment.committed);
assertFalse(segment.search);
} else {
assertFalse(segment.committed);
assertTrue(segment.search);
}
}
assertEquals(4, segments.size());
assertFalse(markedInactive.get());
assertBusy(() -> {
primary.checkIdle(0);
assertFalse(primary.isActive());
});
assertTrue(markedInactive.get());
segments = primary.segments(false);
assertEquals(1, segments.size());
for (Segment segment : segments) {
assertTrue(segment.committed);
assertTrue(segment.search);
}
closeShards(primary);
}
public void testOnCloseStats() throws IOException { public void testOnCloseStats() throws IOException {
final IndexShard indexShard = newStartedShard(true); final IndexShard indexShard = newStartedShard(true);

View File

@ -569,7 +569,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
null, null,
null, null,
null, null,
null,
primaryReplicaSyncer, primaryReplicaSyncer,
s -> {}, s -> {},
RetentionLeaseSyncer.EMPTY RetentionLeaseSyncer.EMPTY

View File

@ -1,267 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.indices.flush;
import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.IndexShardRoutingTable;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.UUIDs;
import org.opensearch.common.lease.Releasable;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.index.IndexService;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.ShardId;
import org.opensearch.index.shard.ShardNotFoundException;
import org.opensearch.indices.IndicesService;
import org.opensearch.test.OpenSearchSingleNodeTestCase;
import org.opensearch.threadpool.ThreadPool;
import java.util.List;
import java.util.Map;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
public class SyncedFlushSingleNodeTests extends OpenSearchSingleNodeTestCase {
public void testModificationPreventsFlushing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests(
flushService,
activeShards,
state,
shardId
);
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
String syncId = UUIDs.randomBase64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
// pull another commit and make sure we can't sync-flush with the old one
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
public void testSingleShardSuccess() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.attemptSyncedFlush(shardId, listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(1, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next();
assertTrue(response.success());
}
public void testSyncFailsIfOperationIsInFlight() throws Exception {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
// wait for the GCP sync spawned from the index request above to complete to avoid that request disturbing the check below
assertBusy(() -> {
assertEquals(0, shard.getLastSyncedGlobalCheckpoint());
assertEquals(0, shard.getActiveOperationsCount());
});
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, "");
try (Releasable operationLock = fut.get()) {
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.attemptSyncedFlush(shardId, listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertNotEquals(0, syncedFlushResult.totalShards());
assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason());
}
}
public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException {
createIndex(
"test",
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
assertNotNull(shard);
final ShardId shardId = shard.shardId();
final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener();
flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener);
listener.latch.await();
assertNotNull(listener.error);
assertNull(listener.result);
assertEquals(ShardNotFoundException.class, listener.error.getClass());
assertEquals("no such shard", listener.error.getMessage());
assertAcked(client().admin().indices().prepareClose("test"));
listener = new SyncedFlushUtil.LatchedListener();
flushService.attemptSyncedFlush(shardId, listener);
listener.latch.await();
assertNotNull(listener.error);
assertNull(listener.result);
assertEquals("closed", listener.error.getMessage());
listener = new SyncedFlushUtil.LatchedListener();
flushService.attemptSyncedFlush(new ShardId("index not found", "_na_", 0), listener);
listener.latch.await();
assertNotNull(listener.error);
assertNull(listener.result);
assertEquals("no such index [index not found]", listener.error.getMessage());
}
public void testFailAfterIntermediateCommit() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests(
flushService,
activeShards,
state,
shardId
);
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
if (randomBoolean()) {
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("test").setForce(true).get();
String syncId = UUIDs.randomBase64UUID();
final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
public void testFailWhenCommitIsMissing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses = SyncedFlushUtil.sendPreSyncRequests(
flushService,
activeShards,
state,
shardId
);
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
preSyncedResponses.clear(); // wipe it...
String syncId = UUIDs.randomBase64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
}

View File

@ -1,126 +0,0 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.indices.flush;
import org.apache.logging.log4j.Logger;
import org.opensearch.ExceptionsHelper;
import org.opensearch.action.ActionListener;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.index.shard.ShardId;
import org.opensearch.test.InternalTestCluster;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import static org.opensearch.test.OpenSearchTestCase.assertBusy;
/** Utils for SyncedFlush */
public class SyncedFlushUtil {
private SyncedFlushUtil() {
}
/**
* Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
*/
public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception {
/*
* When the last indexing operation is completed, we will fire a global checkpoint sync.
* Since a global checkpoint sync request is a replication request, it will acquire an index
* shard permit on the primary when executing. If this happens at the same time while we are
* issuing the synced-flush, the synced-flush request will fail as it thinks there are
* in-flight operations. We can avoid such situation by continuing issuing another synced-flush
* if the synced-flush failed due to the ongoing operations on the primary.
*/
SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
AtomicReference<LatchedListener<ShardsSyncedFlushResult>> listenerHolder = new AtomicReference<>();
assertBusy(() -> {
LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>();
listenerHolder.set(listener);
service.attemptSyncedFlush(shardId, listener);
listener.latch.await();
if (listener.result != null
&& listener.result.failureReason() != null
&& listener.result.failureReason().contains("ongoing operations on primary")) {
throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry
}
});
if (listenerHolder.get().error != null) {
throw ExceptionsHelper.convertToOpenSearchException(listenerHolder.get().error);
}
return listenerHolder.get().result;
}
public static final class LatchedListener<T> implements ActionListener<T> {
public volatile T result;
public volatile Exception error;
public final CountDownLatch latch = new CountDownLatch(1);
@Override
public void onResponse(T result) {
this.result = result;
latch.countDown();
}
@Override
public void onFailure(Exception e) {
error = e;
latch.countDown();
}
}
/**
* Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)}
*/
public static Map<String, SyncedFlushService.PreSyncedFlushResponse> sendPreSyncRequests(
SyncedFlushService service,
List<ShardRouting> activeShards,
ClusterState state,
ShardId shardId
) {
LatchedListener<Map<String, SyncedFlushService.PreSyncedFlushResponse>> listener = new LatchedListener<>();
service.sendPreSyncRequests(activeShards, state, shardId, listener);
try {
listener.latch.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (listener.error != null) {
throw ExceptionsHelper.convertToOpenSearchException(listener.error);
}
return listener.result;
}
}

View File

@ -178,7 +178,6 @@ import org.opensearch.indices.SystemIndices;
import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.analysis.AnalysisModule;
import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.breaker.NoneCircuitBreakerService;
import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.cluster.IndicesClusterStateService;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.mapper.MapperRegistry;
import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoverySourceService;
import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.PeerRecoveryTargetService;
@ -1835,7 +1834,6 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
new NodeMappingRefreshAction(transportService, metadataMappingService), new NodeMappingRefreshAction(transportService, metadataMappingService),
repositoriesService, repositoriesService,
mock(SearchService.class), mock(SearchService.class),
new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver),
new PeerRecoverySourceService(transportService, indicesService, recoverySettings), new PeerRecoverySourceService(transportService, indicesService, recoverySettings),
snapshotShardsService, snapshotShardsService,
new PrimaryReplicaSyncer( new PrimaryReplicaSyncer(

View File

@ -95,7 +95,6 @@ import org.opensearch.http.HttpServerTransport;
import org.opensearch.index.Index; import org.opensearch.index.Index;
import org.opensearch.index.IndexService; import org.opensearch.index.IndexService;
import org.opensearch.index.IndexingPressure; import org.opensearch.index.IndexingPressure;
import org.opensearch.index.engine.CommitStats;
import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.DocIdSeqNoAndSource;
import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.Engine;
import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.EngineTestCase;
@ -135,7 +134,6 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -1252,48 +1250,11 @@ public final class InternalTestCluster extends TestCluster {
// and not all docs have been purged after the test) and inherit from // and not all docs have been purged after the test) and inherit from
// ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures.
assertNoPendingIndexOperations(); assertNoPendingIndexOperations();
// check that shards that have same sync id also contain same number of documents
assertSameSyncIdSameDocs();
assertAllPendingWriteLimitsReleased(); assertAllPendingWriteLimitsReleased();
assertOpenTranslogReferences(); assertOpenTranslogReferences();
assertNoSnapshottedIndexCommit(); assertNoSnapshottedIndexCommit();
} }
private void assertSameSyncIdSameDocs() {
Map<String, Long> docsOnShards = new HashMap<>();
final Collection<NodeAndClient> nodesAndClients = nodes.values();
for (NodeAndClient nodeAndClient : nodesAndClients) {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
try {
CommitStats commitStats = indexShard.commitStats();
String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
if (syncId != null) {
long liveDocsOnShard = commitStats.getNumDocs();
if (docsOnShards.get(syncId) != null) {
assertThat(
"sync id is equal but number of docs does not match on node "
+ nodeAndClient.name
+ ". expected "
+ docsOnShards.get(syncId)
+ " but got "
+ liveDocsOnShard,
docsOnShards.get(syncId),
equalTo(liveDocsOnShard)
);
} else {
docsOnShards.put(syncId, liveDocsOnShard);
}
}
} catch (AlreadyClosedException e) {
// the engine is closed or if the shard is recovering
}
}
}
}
}
private void assertAllPendingWriteLimitsReleased() throws Exception { private void assertAllPendingWriteLimitsReleased() throws Exception {
assertBusy(() -> { assertBusy(() -> {
for (NodeAndClient nodeAndClient : nodes.values()) { for (NodeAndClient nodeAndClient : nodes.values()) {

View File

@ -135,11 +135,6 @@ public final class MockIndexEventListener {
delegate.indexShardStateChanged(indexShard, previousState, currentState, reason); delegate.indexShardStateChanged(indexShard, previousState, currentState, reason);
} }
@Override
public void onShardInactive(IndexShard indexShard) {
delegate.onShardInactive(indexShard);
}
@Override @Override
public void beforeIndexCreated(Index index, Settings indexSettings) { public void beforeIndexCreated(Index index, Settings indexSettings) {
delegate.beforeIndexCreated(index, indexSettings); delegate.beforeIndexCreated(index, indexSettings);

View File

@ -197,7 +197,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.opensearch.client.Requests.syncedFlushRequest;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueMillis;
@ -1674,20 +1673,11 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase {
.setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setIndicesOptions(IndicesOptions.lenientExpandOpen())
.execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); .execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
} else if (maybeFlush && rarely()) { } else if (maybeFlush && rarely()) {
if (randomBoolean()) {
client().admin() client().admin()
.indices() .indices()
.prepareFlush(indices) .prepareFlush(indices)
.setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setIndicesOptions(IndicesOptions.lenientExpandOpen())
.execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); .execute(new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
} else {
client().admin()
.indices()
.syncedFlush(
syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()),
new LatchedActionListener<>(newLatch(inFlightAsyncOperations))
);
}
} else if (rarely()) { } else if (rarely()) {
client().admin() client().admin()
.indices() .indices()

View File

@ -71,7 +71,6 @@ import org.opensearch.common.xcontent.support.XContentMapValues;
import org.opensearch.core.internal.io.IOUtils; import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSettings;
import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.ReplicationTracker;
import org.opensearch.indices.flush.SyncedFlushService;
import org.opensearch.rest.RestStatus; import org.opensearch.rest.RestStatus;
import org.opensearch.snapshots.SnapshotState; import org.opensearch.snapshots.SnapshotState;
import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTestCase;
@ -101,6 +100,7 @@ import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
@ -1269,13 +1269,27 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase {
return minVersion; return minVersion;
} }
protected static void performSyncedFlush(String indexName, boolean retryOnConflict) throws Exception { protected void syncedFlush(String indexName, boolean retryOnConflict) throws Exception {
final Request request = new Request("POST", indexName + "/_flush/synced"); final Request request = new Request("POST", indexName + "/_flush/synced");
final List<String> expectedWarnings = Collections.singletonList(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE);
final Builder options = RequestOptions.DEFAULT.toBuilder(); final Builder options = RequestOptions.DEFAULT.toBuilder();
// 8.0 kept in warning message for legacy purposes TODO: changge to 3.0
final List<String> warningMessage = Arrays.asList(
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."
);
final List<String> expectedWarnings = Arrays.asList(
"Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."
);
if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_2_0_0))) {
options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false); options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false);
} else if (nodeVersions.stream().anyMatch(version -> version.onOrAfter(LegacyESVersion.V_7_6_0))) {
options.setWarningsHandler(
warnings -> warnings.isEmpty() == false
&& warnings.equals(expectedWarnings) == false
&& warnings.equals(warningMessage) == false
);
}
request.setOptions(options); request.setOptions(options);
// We have to spin a synced-flush request because we fire the global checkpoint sync for the last write operation. // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation.
// A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit.
assertBusy(() -> { assertBusy(() -> {
try { try {
@ -1291,6 +1305,26 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase {
} }
} }
}); });
// ensure the global checkpoint is synced; otherwise we might trim the commit with syncId
ensureGlobalCheckpointSynced(indexName);
}
@SuppressWarnings("unchecked")
private void ensureGlobalCheckpointSynced(String index) throws Exception {
assertBusy(() -> {
Map<?, ?> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
List<Map<?, ?>> shardStats = (List<Map<?, ?>>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats);
shardStats.stream()
.map(shard -> (Map<?, ?>) XContentMapValues.extractValue("seq_no", shard))
.filter(Objects::nonNull)
.forEach(seqNoStat -> {
long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue();
long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue();
long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue();
assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo));
assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo));
});
}, 60, TimeUnit.SECONDS);
} }
static final Pattern CREATE_INDEX_MULTIPLE_MATCHING_TEMPLATES = Pattern.compile( static final Pattern CREATE_INDEX_MULTIPLE_MATCHING_TEMPLATES = Pattern.compile(