add IT for #14387
This commit is contained in:
parent
219fe8f30f
commit
7b5e323ec0
|
@ -77,7 +77,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
|
||||||
}
|
}
|
||||||
String indexName = recoveryState.getShardId().getIndex();
|
String indexName = recoveryState.getShardId().getIndex();
|
||||||
if (!shardResponses.containsKey(indexName)) {
|
if (!shardResponses.containsKey(indexName)) {
|
||||||
shardResponses.put(indexName, new ArrayList<RecoveryState>());
|
shardResponses.put(indexName, new ArrayList<>());
|
||||||
}
|
}
|
||||||
if (request.activeOnly()) {
|
if (request.activeOnly()) {
|
||||||
if (recoveryState.getStage() != RecoveryState.Stage.DONE) {
|
if (recoveryState.getStage() != RecoveryState.Stage.DONE) {
|
||||||
|
|
|
@ -21,10 +21,16 @@ package org.elasticsearch.recovery;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||||
|
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||||
|
@ -124,4 +130,36 @@ public class FullRollingRestartIT extends ESIntegTestCase {
|
||||||
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000l);
|
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000l);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testNoRebalanceOnRollingRestart() throws Exception {
|
||||||
|
// see https://github.com/elastic/elasticsearch/issues/14387
|
||||||
|
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||||
|
internalCluster().startNodesAsync(3, Settings.builder().put("node.master", false).build()).get();
|
||||||
|
/**
|
||||||
|
* We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations.
|
||||||
|
* Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject
|
||||||
|
* to relocating to the restarting node since all had 2 shards and now one node has nothing allocated.
|
||||||
|
* We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen.
|
||||||
|
*/
|
||||||
|
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "6").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueMinutes(1))).get();
|
||||||
|
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
client().prepareIndex("test", "type1", Long.toString(i))
|
||||||
|
.setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
|
||||||
|
}
|
||||||
|
ensureGreen();
|
||||||
|
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||||
|
RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
|
||||||
|
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
|
||||||
|
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION);
|
||||||
|
}
|
||||||
|
internalCluster().restartRandomDataNode();
|
||||||
|
ensureGreen();
|
||||||
|
ClusterState afterState = client().admin().cluster().prepareState().get().getState();
|
||||||
|
|
||||||
|
recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
|
||||||
|
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
|
||||||
|
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue