Test: force merge index in the end of IndexStatsTests.throttleStats
This works around slow IO (fsync) causing the test-framework cleanup to timeout at 30 seconds when trying to delete the index. Closes #8528
This commit is contained in:
parent
fd8a56dc0b
commit
2f40b464ad
|
@ -36,7 +36,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamInput;
|
import org.elasticsearch.common.io.stream.BytesStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider;
|
import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider;
|
||||||
|
@ -331,61 +330,52 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void throttleStats() throws Exception {
|
public void throttleStats() throws Exception {
|
||||||
String luceneIWLevel = Loggers.getLogger("lucene.iw").getLevel();
|
assertAcked(prepareCreate("test")
|
||||||
Loggers.getLogger("lucene.iw").setLevel("TRACE");
|
.setSettings(ImmutableSettings.builder()
|
||||||
String msLevel = Loggers.getLogger("index.merge.scheduler").getLevel();
|
.put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge")
|
||||||
Loggers.getLogger("index.merge.scheduler").setLevel("TRACE");
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
|
||||||
String indexShardLevel = Loggers.getLogger("index.shard.service").getLevel();
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
|
||||||
Loggers.getLogger("index.shard.service").setLevel("TRACE");
|
.put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
|
||||||
String engineLevel = Loggers.getLogger("test.engine").getLevel();
|
.put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
|
||||||
Loggers.getLogger("test.engine").setLevel("TRACE");
|
.put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
|
||||||
|
.put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "1")
|
||||||
|
.put("index.merge.policy.type", "tiered")
|
||||||
|
|
||||||
try {
|
));
|
||||||
assertAcked(prepareCreate("test")
|
ensureGreen();
|
||||||
.setSettings(ImmutableSettings.builder()
|
long termUpto = 0;
|
||||||
.put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge")
|
IndicesStatsResponse stats;
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
|
// make sure we see throttling kicking in:
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
|
boolean done = false;
|
||||||
.put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
|
long start = System.currentTimeMillis();
|
||||||
.put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
|
while (!done) {
|
||||||
.put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
|
for(int i=0; i<100; i++) {
|
||||||
.put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "1")
|
// Provoke slowish merging by making many unique terms:
|
||||||
.put("index.merge.policy.type", "tiered")
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for(int j=0; j<100; j++) {
|
||||||
));
|
sb.append(' ');
|
||||||
ensureGreen();
|
sb.append(termUpto++);
|
||||||
long termUpto = 0;
|
|
||||||
IndicesStatsResponse stats;
|
|
||||||
// make sure we see throttling kicking in:
|
|
||||||
boolean done = false;
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
while (!done) {
|
|
||||||
for(int i=0; i<100; i++) {
|
|
||||||
// Provoke slowish merging by making many unique terms:
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
for(int j=0; j<100; j++) {
|
|
||||||
sb.append(' ');
|
|
||||||
sb.append(termUpto++);
|
|
||||||
}
|
|
||||||
client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
|
|
||||||
if (i % 2 == 0) {
|
|
||||||
refresh();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
refresh();
|
client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
|
||||||
stats = client().admin().indices().prepareStats().execute().actionGet();
|
if (i % 2 == 0) {
|
||||||
//nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
|
refresh();
|
||||||
done = stats.getPrimaries().getIndexing().getTotal().getThrottleTimeInMillis() > 0;
|
|
||||||
if (System.currentTimeMillis() - start > 300*1000) { //Wait 5 minutes for throttling to kick in
|
|
||||||
fail("index throttling didn't kick in after 5 minutes of intense merging");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
refresh();
|
||||||
Loggers.getLogger("lucene.iw").setLevel(luceneIWLevel);
|
stats = client().admin().indices().prepareStats().execute().actionGet();
|
||||||
Loggers.getLogger("index.merge.scheduler").setLevel(msLevel);
|
//nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
|
||||||
Loggers.getLogger("index.shard.service").setLevel(indexShardLevel);
|
done = stats.getPrimaries().getIndexing().getTotal().getThrottleTimeInMillis() > 0;
|
||||||
Loggers.getLogger("test.engine").setLevel(engineLevel);
|
if (System.currentTimeMillis() - start > 300*1000) { //Wait 5 minutes for throttling to kick in
|
||||||
|
fail("index throttling didn't kick in after 5 minutes of intense merging");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
|
||||||
|
// when ElasticsearchIntegrationTest.after tries to remove indices created by the test:
|
||||||
|
logger.info("test: now optimize");
|
||||||
|
client().admin().indices().prepareOptimize("test").setWaitForMerge(true).get();
|
||||||
|
flush();
|
||||||
|
logger.info("test: test done");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue