TEST: Add bwc recovery tests with synced-flush index
Although the master branch does not affect by #31482, it's helpful to have BWC tests that verify the peer recovery with a synced-flush index. This commit adds the bwc tests from #31506 to the master branch. Relates #31482 Relates #31506
This commit is contained in:
parent
638b9fd88c
commit
51151027cd
|
@ -24,7 +24,9 @@ import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
import org.apache.http.util.EntityUtils;
|
import org.apache.http.util.EntityUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.client.RestClient;
|
import org.elasticsearch.client.RestClient;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
|
@ -701,8 +703,24 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
||||||
|
|
||||||
// make sure all recoveries are done
|
// make sure all recoveries are done
|
||||||
ensureGreen(index);
|
ensureGreen(index);
|
||||||
|
// Recovering a synced-flush index from 5.x to 6.x might be subtle as a 5.x index commit does not have all 6.x commit tags.
|
||||||
|
if (randomBoolean()) {
|
||||||
|
// We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation.
|
||||||
|
// A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit.
|
||||||
|
assertBusy(() -> {
|
||||||
|
try {
|
||||||
|
Response resp = client().performRequest(new Request("POST", index + "/_flush/synced"));
|
||||||
|
Map<String, Object> result = ObjectPath.createFromResponse(resp).evaluate("_shards");
|
||||||
|
assertThat(result.get("successful"), equalTo(result.get("total")));
|
||||||
|
assertThat(result.get("failed"), equalTo(0));
|
||||||
|
} catch (ResponseException ex) {
|
||||||
|
throw new AssertionError(ex); // cause assert busy to retry
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index
|
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index
|
||||||
client().performRequest("POST", "/_flush");
|
assertOK(client().performRequest(new Request("POST", "/_flush")));
|
||||||
|
}
|
||||||
if (shouldHaveTranslog) {
|
if (shouldHaveTranslog) {
|
||||||
// Update a few documents so we are sure to have a translog
|
// Update a few documents so we are sure to have a translog
|
||||||
indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false,
|
indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false,
|
||||||
|
|
|
@ -22,7 +22,9 @@ import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
|
@ -239,4 +241,34 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testRecoverSyncedFlushIndex() throws Exception {
|
||||||
|
final String index = "recover_synced_flush_index";
|
||||||
|
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||||
|
Settings.Builder settings = Settings.builder()
|
||||||
|
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||||
|
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||||
|
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||||
|
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||||
|
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||||
|
// before timing out
|
||||||
|
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
|
||||||
|
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster
|
||||||
|
createIndex(index, settings.build());
|
||||||
|
indexDocs(index, 0, randomInt(5));
|
||||||
|
// We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation.
|
||||||
|
// A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit.
|
||||||
|
assertBusy(() -> {
|
||||||
|
try {
|
||||||
|
Response resp = client().performRequest(new Request("POST", index + "/_flush/synced"));
|
||||||
|
Map<String, Object> result = ObjectPath.createFromResponse(resp).evaluate("_shards");
|
||||||
|
assertThat(result.get("successful"), equalTo(result.get("total")));
|
||||||
|
assertThat(result.get("failed"), equalTo(0));
|
||||||
|
} catch (ResponseException ex) {
|
||||||
|
throw new AssertionError(ex); // cause assert busy to retry
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
ensureGreen(index);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue