From 4110c1ab6c16901031f99d717f22b08930b6b13d Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Sun, 1 Mar 2015 12:10:35 +0100 Subject: [PATCH] Test: InternalEngineTests.testSegmentsWithMergeFlag should close it's translog Also improve test suite tearDown to not fail if setUp didn't create engines, translogs etc. --- .../index/engine/InternalEngineTests.java | 112 +++++++++--------- 1 file changed, 53 insertions(+), 59 deletions(-) diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ea4e4b29190..a1e5889ef68 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -95,6 +95,7 @@ import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy; import static org.elasticsearch.test.ElasticsearchTestCase.terminate; import static org.hamcrest.Matchers.*; +@TestLogging("index.translog:TRACE") public class InternalEngineTests extends ElasticsearchLuceneTestCase { public static final String TRANSLOG_PRIMARY_LOCATION = "work/fs-translog/JVM_" + CHILD_JVM_ID + "/primary"; @@ -179,14 +180,9 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { @After public void tearDown() throws Exception { super.tearDown(); - replicaEngine.close(); - storeReplica.close(); - - engine.close(); - store.close(); - - translog.close(); - replicaTranslog.close(); + IOUtils.close( + replicaEngine, storeReplica, replicaTranslog, + engine, store, translog); terminate(threadPool); } @@ -430,60 +426,59 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { @Test public void testSegmentsWithMergeFlag() throws Exception { - final Store store = createStore(); ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS)); IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - final Engine engine = createEngine(indexSettingsService, store, createTranslog(), mergeSchedulerProvider); + try (Store store = createStore(); + Translog translog = createTranslog(); + Engine engine = createEngine(indexSettingsService, store, translog, mergeSchedulerProvider)) { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, false); - Engine.Index index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - engine.flush(); - assertThat(engine.segments(false).size(), equalTo(1)); - index = new Engine.Index(null, newUid("2"), doc); - engine.index(index); - engine.flush(); - List segments = engine.segments(false); - assertThat(segments.size(), equalTo(2)); - for (Segment segment : segments) { - assertThat(segment.getMergeId(), nullValue()); + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, false); + Engine.Index index = new Engine.Index(null, newUid("1"), doc); + engine.index(index); + engine.flush(); + assertThat(engine.segments(false).size(), equalTo(1)); + index = new Engine.Index(null, newUid("2"), doc); + engine.index(index); + engine.flush(); + List segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + for (Segment segment : segments) { + assertThat(segment.getMergeId(), nullValue()); + } + index = new Engine.Index(null, newUid("3"), doc); + engine.index(index); + engine.flush(); + segments = engine.segments(false); + assertThat(segments.size(), equalTo(3)); + for (Segment segment : segments) { + assertThat(segment.getMergeId(), nullValue()); + } + + index = new Engine.Index(null, newUid("4"), doc); + engine.index(index); + engine.flush(); + final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration(); + // now, optimize and wait for merges, see that we have no merge flag + engine.forceMerge(true); + + for (Segment segment : engine.segments(false)) { + assertThat(segment.getMergeId(), nullValue()); + } + // we could have multiple underlying merges, so the generation may increase more than once + assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1); + + final boolean flush = randomBoolean(); + final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration(); + engine.forceMerge(flush); + for (Segment segment : engine.segments(false)) { + assertThat(segment.getMergeId(), nullValue()); + } + + if (flush) { + // we should have had just 1 merge, so last generation should be exact + assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration()); + } } - index = new Engine.Index(null, newUid("3"), doc); - engine.index(index); - engine.flush(); - segments = engine.segments(false); - assertThat(segments.size(), equalTo(3)); - for (Segment segment : segments) { - assertThat(segment.getMergeId(), nullValue()); - } - - index = new Engine.Index(null, newUid("4"), doc); - engine.index(index); - engine.flush(); - final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration(); - // now, optimize and wait for merges, see that we have no merge flag - engine.forceMerge(true); - - for (Segment segment : engine.segments(false)) { - assertThat(segment.getMergeId(), nullValue()); - } - // we could have multiple underlying merges, so the generation may increase more than once - assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1); - - final boolean flush = randomBoolean(); - final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration(); - engine.forceMerge(flush); - for (Segment segment : engine.segments(false)) { - assertThat(segment.getMergeId(), nullValue()); - } - - if (flush) { - // we should have had just 1 merge, so last generation should be exact - assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration()); - } - - engine.close(); - store.close(); } @Test @@ -737,7 +732,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { @Test - @TestLogging("index.translog:TRACE") public void testSimpleRecover() throws Exception { final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); engine.create(new Engine.Create(null, newUid("1"), doc));