Remove testNodeFailuresAreProcessedOnce

This test was not checking the thing it was supposed to anyway.
This commit is contained in:
Yannick Welsch 2019-05-22 14:45:26 +02:00
parent 250973af1d
commit eae58c477c
1 changed files with 0 additions and 43 deletions

View File

@ -42,18 +42,14 @@ import org.elasticsearch.test.TestCustomMetaData;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.transport.RemoteTransportException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
@ -105,45 +101,6 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster));
}
public void testNodeFailuresAreProcessedOnce() throws IOException {
Settings defaultSettings = Settings.builder()
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s")
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1")
.build();
Settings masterNodeSettings = Settings.builder()
.put(Node.NODE_DATA_SETTING.getKey(), false)
.put(defaultSettings)
.build();
String master = internalCluster().startNode(masterNodeSettings);
Settings dateNodeSettings = Settings.builder()
.put(Node.NODE_MASTER_SETTING.getKey(), false)
.put(defaultSettings)
.build();
internalCluster().startNodes(2, dateNodeSettings);
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
final AtomicInteger numUpdates = new AtomicInteger();
final CountDownLatch nodesStopped = new CountDownLatch(1);
clusterService.addStateApplier(event -> {
numUpdates.incrementAndGet();
try {
// block until both nodes have stopped to accumulate node failures
nodesStopped.await();
} catch (InterruptedException e) {
//meh
}
});
internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
nodesStopped.countDown();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed
assertThat(numUpdates.get(), either(equalTo(1)).or(equalTo(2))); // due to batching, both nodes can be handled in same CS update
}
public void testHandleNodeJoin_incompatibleClusterState()
throws InterruptedException, ExecutionException, TimeoutException {
String masterNode = internalCluster().startMasterOnlyNode();