[Test] Speedup RecoveryWhileUnderLoadTests to run with less documents unless tests.nightly=true

This commit is contained in:
Simon Willnauer 2014-03-28 11:58:06 +01:00
parent e60ca8d8d7
commit e3317f2c6b
1 changed files with 42 additions and 34 deletions

View File

@ -99,18 +99,20 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
writers[i].start(); writers[i].start();
} }
try { try {
logger.info("--> waiting for 2000 docs to be indexed ..."); final int totalNumDocs = scaledRandomIntBetween(200, 20000);
waitForDocs(2000); int waitFor = totalNumDocs / 3;
logger.info("--> 2000 docs indexed"); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(waitFor);
logger.info("--> {} docs indexed", waitFor);
logger.info("--> flushing the index ...."); logger.info("--> flushing the index ....");
// now flush, just to make sure we have some data in the index, not just translog // now flush, just to make sure we have some data in the index, not just translog
client().admin().indices().prepareFlush().execute().actionGet(); client().admin().indices().prepareFlush().execute().actionGet();
waitFor += totalNumDocs / 3;
logger.info("--> waiting for 4000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(4000); waitForDocs(waitFor);
logger.info("--> 4000 docs indexed"); logger.info("--> {} docs indexed", waitFor);
logger.info("--> allow 2 nodes for index [test] ..."); logger.info("--> allow 2 nodes for index [test] ...");
// now start another node, while we index // now start another node, while we index
@ -120,9 +122,9 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
// make sure the cluster state is green, and all has been recovered // make sure the cluster state is green, and all has been recovered
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false)); assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
logger.info("--> waiting for 15000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
waitForDocs(15000); waitForDocs(totalNumDocs);
logger.info("--> 15000 docs indexed"); logger.info("--> {} docs indexed", totalNumDocs);
logger.info("--> marking and waiting for indexing threads to stop ..."); logger.info("--> marking and waiting for indexing threads to stop ...");
stop.set(true); stop.set(true);
@ -179,20 +181,21 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
}; };
writers[i].start(); writers[i].start();
} }
final int totalNumDocs = scaledRandomIntBetween(200, 20000);
int waitFor = totalNumDocs / 3;
try { try {
logger.info("--> waiting for 2000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(2000); waitForDocs(waitFor);
logger.info("--> 2000 docs indexed"); logger.info("--> {} docs indexed", waitFor);
logger.info("--> flushing the index ...."); logger.info("--> flushing the index ....");
// now flush, just to make sure we have some data in the index, not just translog // now flush, just to make sure we have some data in the index, not just translog
client().admin().indices().prepareFlush().execute().actionGet(); client().admin().indices().prepareFlush().execute().actionGet();
waitFor += totalNumDocs / 3;
logger.info("--> waiting for 4000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(4000); waitForDocs(waitFor);
logger.info("--> 4000 docs indexed"); logger.info("--> {} docs indexed", waitFor);
logger.info("--> allow 4 nodes for index [test] ..."); logger.info("--> allow 4 nodes for index [test] ...");
allowNodes("test", 4); allowNodes("test", 4);
@ -200,9 +203,9 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false)); assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
logger.info("--> waiting for 15000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
waitForDocs(15000); waitForDocs(totalNumDocs);
logger.info("--> 15000 docs indexed"); logger.info("--> {} docs indexed", totalNumDocs);
stop.set(true); stop.set(true);
stopLatch.await(); stopLatch.await();
@ -264,19 +267,21 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
}; };
writers[i].start(); writers[i].start();
} }
final int totalNumDocs = scaledRandomIntBetween(200, 20000);
int waitFor = totalNumDocs / 3;
try { try {
logger.info("--> waiting for 2000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(2000); waitForDocs(waitFor);
logger.info("--> 2000 docs indexed"); logger.info("--> {} docs indexed", waitFor);
logger.info("--> flushing the index ...."); logger.info("--> flushing the index ....");
// now flush, just to make sure we have some data in the index, not just translog // now flush, just to make sure we have some data in the index, not just translog
client().admin().indices().prepareFlush().execute().actionGet(); client().admin().indices().prepareFlush().execute().actionGet();
waitFor += totalNumDocs / 3;
logger.info("--> waiting for 4000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", waitFor);
waitForDocs(4000); waitForDocs(waitFor);
logger.info("--> 4000 docs indexed"); logger.info("--> {} docs indexed", waitFor);
// now start more nodes, while we index // now start more nodes, while we index
logger.info("--> allow 4 nodes for index [test] ..."); logger.info("--> allow 4 nodes for index [test] ...");
@ -286,10 +291,9 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false)); assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForGreenStatus().setWaitForNodes(">=4").execute().actionGet().isTimedOut(), equalTo(false));
logger.info("--> waiting for 15000 docs to be indexed ..."); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
waitForDocs(15000); waitForDocs(totalNumDocs);
logger.info("--> 15000 docs indexed"); logger.info("--> {} docs indexed", totalNumDocs);
// now, shutdown nodes // now, shutdown nodes
logger.info("--> allow 3 nodes for index [test] ..."); logger.info("--> allow 3 nodes for index [test] ...");
allowNodes("test", 3); allowNodes("test", 3);
@ -341,6 +345,7 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
final CountDownLatch stopLatch = new CountDownLatch(writers.length); final CountDownLatch stopLatch = new CountDownLatch(writers.length);
logger.info("--> starting {} indexing threads", writers.length); logger.info("--> starting {} indexing threads", writers.length);
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<>(); final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<>();
final CountDownLatch startLatch = new CountDownLatch(1);
for (int i = 0; i < writers.length; i++) { for (int i = 0; i < writers.length; i++) {
final int indexerId = i; final int indexerId = i;
final Client client = client(); final Client client = client();
@ -349,6 +354,7 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
public void run() { public void run() {
long id = -1; long id = -1;
try { try {
startLatch.await();
logger.info("**** starting indexing thread {}", indexerId); logger.info("**** starting indexing thread {}", indexerId);
while (!stop.get()) { while (!stop.get()) {
id = idGenerator.incrementAndGet(); id = idGenerator.incrementAndGet();
@ -369,8 +375,10 @@ public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
} }
try { try {
final int numDocs = between(10000, 50000); final int numDocs = scaledRandomIntBetween(200, 50000);
for (int i = 0; i < numDocs; i += between(100, 1000)) { logger.info("--> indexing {} docs in total ...", numDocs);
startLatch.countDown();
for (int i = 0; i < numDocs; i += scaledRandomIntBetween(100, Math.min(1000, numDocs))) {
assertThat(failures, emptyIterable()); assertThat(failures, emptyIterable());
logger.info("--> waiting for {} docs to be indexed ...", i); logger.info("--> waiting for {} docs to be indexed ...", i);
waitForDocs(i); waitForDocs(i);