Disable SegmentReader ram usage by default even if -ea is provided

This commit is contained in:
Simon Willnauer 2013-12-23 11:00:34 +01:00
parent af10b65fe1
commit 2d77e2a37e
3 changed files with 78 additions and 63 deletions

View File

@ -1118,14 +1118,11 @@ public class RobinEngine extends AbstractIndexShardComponent implements Engine {
}
}
static final boolean allowRamBytesUsed;
/*this is only used by one test right now and shoudl go away entirely once we update lucene*/
private static boolean allowRamBytesUsed = false;
static {
assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_46 :
"when upgrading to a new lucene version, check if ramBytes is fixed, see https://issues.apache.org/jira/browse/LUCENE-5373";
boolean xAllowRamBytesUsed = false;
assert xAllowRamBytesUsed = true;
allowRamBytesUsed = xAllowRamBytesUsed;
}
private long getReaderRamBytesUsed(AtomicReaderContext reader) {

View File

@ -38,6 +38,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.hamcrest.Matchers;
import org.junit.Test;
import java.lang.reflect.Field;
import java.util.Collection;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -47,11 +48,15 @@ public class RobinEngineIntegrationTest extends ElasticsearchIntegrationTest {
@Test
public void testSettingLoadBloomFilterDefaultTrue() throws Exception {
Field allowRamBytesUsed = RobinEngine.class.getDeclaredField("allowRamBytesUsed");
allowRamBytesUsed.setAccessible(true);
allowRamBytesUsed.set(RobinEngine.class, Boolean.TRUE);
try {
client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
client().prepareIndex("test", "foo").setSource("field", "foo").get();
ensureGreen();
refresh();
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
final long segmentsMemoryWithBloom = stats.getTotal().getSegments().getMemoryInBytes();
logger.info("segments with bloom: {}", segmentsMemoryWithBloom);
@ -79,10 +84,17 @@ public class RobinEngineIntegrationTest extends ElasticsearchIntegrationTest {
return newSegmentsMemoryWithBloom == segmentsMemoryWithBloom;
}
});
} finally {
allowRamBytesUsed.set(RobinEngine.class, Boolean.FALSE);
}
}
@Test
public void testSettingLoadBloomFilterDefaultFalse() throws Exception {
Field allowRamBytesUsed = RobinEngine.class.getDeclaredField("allowRamBytesUsed");
allowRamBytesUsed.setAccessible(true);
allowRamBytesUsed.set(RobinEngine.class, Boolean.TRUE);
try {
client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("number_of_replicas", 0).put("number_of_shards", 1).put(CodecService.INDEX_CODEC_BLOOM_LOAD, false)).get();
client().prepareIndex("test", "foo").setSource("field", "foo").get();
ensureGreen();
@ -115,6 +127,10 @@ public class RobinEngineIntegrationTest extends ElasticsearchIntegrationTest {
return newSegmentsMemoryWithoutBloom == segmentsMemoryWithoutBloom;
}
});
} finally {
allowRamBytesUsed.set(RobinEngine.class, Boolean.FALSE);
}
}
@Test

View File

@ -19,6 +19,7 @@
package org.elasticsearch.indices.stats;
import org.apache.lucene.util.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
@ -207,6 +208,7 @@ public class SimpleIndexStatsTests extends ElasticsearchIntegrationTest {
assertThat(stats.getTotal().getSegments(), notNullValue());
assertThat(stats.getTotal().getSegments().getCount(), equalTo(10l));
assumeTrue(org.elasticsearch.Version.CURRENT.luceneVersion != Version.LUCENE_46);
assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0l));
}