浏览代码

Test: InternalEngineTests.testSegmentsWithMergeFlag should close it's translog

Also improve test suite tearDown to not fail if setUp didn't create engines, translogs etc.
Boaz Leskes 10 年之前
父节点
当前提交
4110c1ab6c
共有 1 个文件被更改,包括 50 次插入56 次删除
  1. 50 56
      src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java

+ 50 - 56
src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java

@@ -95,6 +95,7 @@ import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy;
 import static org.elasticsearch.test.ElasticsearchTestCase.terminate;
 import static org.hamcrest.Matchers.*;
 
+@TestLogging("index.translog:TRACE")
 public class InternalEngineTests extends ElasticsearchLuceneTestCase {
 
     public static final String TRANSLOG_PRIMARY_LOCATION = "work/fs-translog/JVM_" + CHILD_JVM_ID + "/primary";
@@ -179,14 +180,9 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase {
     @After
     public void tearDown() throws Exception {
         super.tearDown();
-        replicaEngine.close();
-        storeReplica.close();
-
-        engine.close();
-        store.close();
-
-        translog.close();
-        replicaTranslog.close();
+        IOUtils.close(
+                replicaEngine, storeReplica, replicaTranslog,
+                engine, store, translog);
 
         terminate(threadPool);
     }
@@ -430,60 +426,59 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase {
 
     @Test
     public void testSegmentsWithMergeFlag() throws Exception {
-        final Store store = createStore();
         ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS));
         IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
-        final Engine engine = createEngine(indexSettingsService, store, createTranslog(), mergeSchedulerProvider);
+        try (Store store = createStore();
+             Translog translog = createTranslog();
+             Engine engine = createEngine(indexSettingsService, store, translog, mergeSchedulerProvider)) {
 
-        ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, false);
-        Engine.Index index = new Engine.Index(null, newUid("1"), doc);
-        engine.index(index);
-        engine.flush();
-        assertThat(engine.segments(false).size(), equalTo(1));
-        index = new Engine.Index(null, newUid("2"), doc);
-        engine.index(index);
-        engine.flush();
-        List<Segment> segments = engine.segments(false);
-        assertThat(segments.size(), equalTo(2));
-        for (Segment segment : segments) {
-            assertThat(segment.getMergeId(), nullValue());
-        }
-        index = new Engine.Index(null, newUid("3"), doc);
-        engine.index(index);
-        engine.flush();
-        segments = engine.segments(false);
-        assertThat(segments.size(), equalTo(3));
-        for (Segment segment : segments) {
-            assertThat(segment.getMergeId(), nullValue());
-        }
+            ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, false);
+            Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+            engine.index(index);
+            engine.flush();
+            assertThat(engine.segments(false).size(), equalTo(1));
+            index = new Engine.Index(null, newUid("2"), doc);
+            engine.index(index);
+            engine.flush();
+            List<Segment> segments = engine.segments(false);
+            assertThat(segments.size(), equalTo(2));
+            for (Segment segment : segments) {
+                assertThat(segment.getMergeId(), nullValue());
+            }
+            index = new Engine.Index(null, newUid("3"), doc);
+            engine.index(index);
+            engine.flush();
+            segments = engine.segments(false);
+            assertThat(segments.size(), equalTo(3));
+            for (Segment segment : segments) {
+                assertThat(segment.getMergeId(), nullValue());
+            }
 
-        index = new Engine.Index(null, newUid("4"), doc);
-        engine.index(index);
-        engine.flush();
-        final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration();
-        // now, optimize and wait for merges, see that we have no merge flag
-        engine.forceMerge(true);
+            index = new Engine.Index(null, newUid("4"), doc);
+            engine.index(index);
+            engine.flush();
+            final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration();
+            // now, optimize and wait for merges, see that we have no merge flag
+            engine.forceMerge(true);
 
-        for (Segment segment : engine.segments(false)) {
-            assertThat(segment.getMergeId(), nullValue());
-        }
-        // we could have multiple underlying merges, so the generation may increase more than once
-        assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1);
-
-        final boolean flush = randomBoolean();
-        final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration();
-        engine.forceMerge(flush);
-        for (Segment segment : engine.segments(false)) {
-            assertThat(segment.getMergeId(), nullValue());
-        }
+            for (Segment segment : engine.segments(false)) {
+                assertThat(segment.getMergeId(), nullValue());
+            }
+            // we could have multiple underlying merges, so the generation may increase more than once
+            assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1);
+
+            final boolean flush = randomBoolean();
+            final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration();
+            engine.forceMerge(flush);
+            for (Segment segment : engine.segments(false)) {
+                assertThat(segment.getMergeId(), nullValue());
+            }
 
-        if (flush) {
-            // we should have had just 1 merge, so last generation should be exact
-            assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration());
+            if (flush) {
+                // we should have had just 1 merge, so last generation should be exact
+                assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration());
+            }
         }
-
-        engine.close();
-        store.close();
     }
 
     @Test
@@ -737,7 +732,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase {
 
 
     @Test
-    @TestLogging("index.translog:TRACE")
     public void testSimpleRecover() throws Exception {
         final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false);
         engine.create(new Engine.Create(null, newUid("1"), doc));