From 65559cb94d2cbbc9081f6f5d6d8f6bac055b11e6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 19 Mar 2018 21:56:48 +0100 Subject: [PATCH] LUCENE-8212: Make sure terms hash is always closed if stored fields writer barfs we still need to close terms hash to close pending files. This is crucial for some tests like TestIndexWriterOnVMError that randomly failed due to this. --- .../org/apache/lucene/index/DefaultIndexingChain.java | 11 ++++++----- .../src/java/org/apache/lucene/index/DocConsumer.java | 2 +- .../java/org/apache/lucene/index/DocumentsWriter.java | 4 ++-- .../apache/lucene/index/DocumentsWriterPerThread.java | 5 ++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java index c9297970972..4541f4a92db 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java +++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java @@ -17,6 +17,7 @@ package org.apache.lucene.index; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -322,11 +323,11 @@ final class DefaultIndexingChain extends DocConsumer { } @Override - public void abort() { - storedFieldsConsumer.abort(); - try { - // E.g. close any open files in the term vectors writer: - termsHash.abort(); + @SuppressWarnings("try") + public void abort() throws IOException{ + // finalizer will e.g. close any open files in the term vectors writer: + try (Closeable finalizer = termsHash::abort){ + storedFieldsConsumer.abort(); } finally { Arrays.fill(fieldHash, null); } diff --git a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java index 8ff57049b98..a64f13c5ba2 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java @@ -22,5 +22,5 @@ import java.io.IOException; abstract class DocConsumer { abstract void processDocument() throws IOException; abstract Sorter.DocMap flush(final SegmentWriteState state) throws IOException; - abstract void abort(); + abstract void abort() throws IOException; } diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java index 616915bcac6..f848b2afa90 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -221,7 +221,7 @@ final class DocumentsWriter implements Closeable, Accountable { * updating the index files) and must discard all * currently buffered docs. This resets our state, * discarding any docs added since last flush. */ - synchronized void abort(IndexWriter writer) { + synchronized void abort(IndexWriter writer) throws IOException { assert !Thread.holdsLock(writer) : "IndexWriter lock should never be hold when aborting"; boolean success = false; try { @@ -324,7 +324,7 @@ final class DocumentsWriter implements Closeable, Accountable { } /** Returns how many documents were aborted. */ - private int abortThreadState(final ThreadState perThread) { + private int abortThreadState(final ThreadState perThread) throws IOException { assert perThread.isHeldByCurrentThread(); if (perThread.isInitialized()) { try { diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java index d5ebc600723..32a783a7ff1 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java @@ -126,8 +126,7 @@ class DocumentsWriterPerThread { * updating the index files) and must discard all * currently buffered docs. This resets our state, * discarding any docs added since last flush. */ - void abort() { - //System.out.println(Thread.currentThread().getName() + ": now abort seg=" + segmentInfo.name); + void abort() throws IOException{ aborted = true; pendingNumDocs.addAndGet(-numDocsInRAM); try { @@ -513,7 +512,7 @@ class DocumentsWriterPerThread { } } - private void maybeAbort(String location) { + private void maybeAbort(String location) throws IOException { if (hasHitAbortingException() && aborted == false) { // if we are already aborted don't do anything here try {