[TEST] Add assertBusy when checking for pending operation counter after tests

Currently, pending operations can complete after tests with disruption scheme
completes. This commit waits for the pending operation counter to complete
after the tests are run
This commit is contained in:
Areek Zillur 2016-11-10 18:35:52 -05:00
parent 5b4c3fb1ac
commit 7ed195fe93
4 changed files with 35 additions and 28 deletions

View File

@ -175,7 +175,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
} }
@Override @Override
protected void beforeIndexDeletion() throws IOException { protected void beforeIndexDeletion() throws Exception {
if (disableBeforeIndexDeletion == false) { if (disableBeforeIndexDeletion == false) {
super.beforeIndexDeletion(); super.beforeIndexDeletion();
} }

View File

@ -577,7 +577,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
return Collections.emptySet(); return Collections.emptySet();
} }
protected void beforeIndexDeletion() throws IOException { protected void beforeIndexDeletion() throws Exception {
cluster().beforeIndexDeletion(); cluster().beforeIndexDeletion();
} }

View File

@ -1020,7 +1020,7 @@ public final class InternalTestCluster extends TestCluster {
} }
@Override @Override
public void beforeIndexDeletion() throws IOException { public void beforeIndexDeletion() throws Exception {
// Check that the operations counter on index shard has reached 0. // Check that the operations counter on index shard has reached 0.
// The assumption here is that after a test there are no ongoing write operations. // The assumption here is that after a test there are no ongoing write operations.
// test that have ongoing write operations after the test (for example because ttl is used // test that have ongoing write operations after the test (for example because ttl is used
@ -1055,33 +1055,40 @@ public final class InternalTestCluster extends TestCluster {
} }
} }
private void assertShardIndexCounter() throws IOException { private void assertShardIndexCounter() throws Exception {
final Collection<NodeAndClient> nodesAndClients = nodes.values(); assertBusy(() -> {
for (NodeAndClient nodeAndClient : nodesAndClients) { final Collection<NodeAndClient> nodesAndClients = nodes.values();
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (NodeAndClient nodeAndClient : nodesAndClients) {
for (IndexService indexService : indexServices) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexShard indexShard : indexService) { for (IndexService indexService : indexServices) {
int activeOperationsCount = indexShard.getActiveOperationsCount(); for (IndexShard indexShard : indexService) {
if (activeOperationsCount > 0) { int activeOperationsCount = indexShard.getActiveOperationsCount();
TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); if (activeOperationsCount > 0) {
DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager();
List<TaskInfo> taskInfos = taskManager.getTasks().values().stream() DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode();
.filter(task -> task instanceof ReplicationTask) List<TaskInfo> taskInfos = taskManager.getTasks().values().stream()
.map(task -> task.taskInfo(localNode.getId(), true)) .filter(task -> task instanceof ReplicationTask)
.collect(Collectors.toList()); .map(task -> task.taskInfo(localNode.getId(), true))
ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); .collect(Collectors.toList());
XContentBuilder builder = XContentFactory.jsonBuilder() ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList());
.prettyPrint() XContentBuilder builder = null;
.startObject() try {
.value(response) builder = XContentFactory.jsonBuilder()
.endObject(); .prettyPrint()
throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + .startObject()
nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + .value(response)
builder.string()); .endObject();
throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " +
nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" +
builder.string());
} catch (IOException e) {
throw new RuntimeException("caught exception while building response [" + response + "]", e);
}
}
} }
} }
} }
} });
} }
private void randomlyResetClients() throws IOException { private void randomlyResetClients() throws IOException {

View File

@ -82,7 +82,7 @@ public abstract class TestCluster implements Closeable {
/** /**
* Assertions that should run before the cluster is wiped should be called in this method * Assertions that should run before the cluster is wiped should be called in this method
*/ */
public void beforeIndexDeletion() throws IOException { public void beforeIndexDeletion() throws Exception {
} }
/** /**