LUCENE-8212: Make sure terms hash is always closed

if stored fields writer barfs we still need to close terms hash to
close pending files. This is crucial for some tests like TestIndexWriterOnVMError
that randomly failed due to this.
This commit is contained in:
Simon Willnauer 2018-03-19 21:56:48 +01:00
parent a832411844
commit 65559cb94d
4 changed files with 11 additions and 11 deletions

View File

@ -17,6 +17,7 @@
package org.apache.lucene.index;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@ -322,11 +323,11 @@ final class DefaultIndexingChain extends DocConsumer {
}
@Override
public void abort() {
@SuppressWarnings("try")
public void abort() throws IOException{
// finalizer will e.g. close any open files in the term vectors writer:
try (Closeable finalizer = termsHash::abort){
storedFieldsConsumer.abort();
try {
// E.g. close any open files in the term vectors writer:
termsHash.abort();
} finally {
Arrays.fill(fieldHash, null);
}

View File

@ -22,5 +22,5 @@ import java.io.IOException;
abstract class DocConsumer {
abstract void processDocument() throws IOException;
abstract Sorter.DocMap flush(final SegmentWriteState state) throws IOException;
abstract void abort();
abstract void abort() throws IOException;
}

View File

@ -221,7 +221,7 @@ final class DocumentsWriter implements Closeable, Accountable {
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort(IndexWriter writer) {
synchronized void abort(IndexWriter writer) throws IOException {
assert !Thread.holdsLock(writer) : "IndexWriter lock should never be hold when aborting";
boolean success = false;
try {
@ -324,7 +324,7 @@ final class DocumentsWriter implements Closeable, Accountable {
}
/** Returns how many documents were aborted. */
private int abortThreadState(final ThreadState perThread) {
private int abortThreadState(final ThreadState perThread) throws IOException {
assert perThread.isHeldByCurrentThread();
if (perThread.isInitialized()) {
try {

View File

@ -126,8 +126,7 @@ class DocumentsWriterPerThread {
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
void abort() {
//System.out.println(Thread.currentThread().getName() + ": now abort seg=" + segmentInfo.name);
void abort() throws IOException{
aborted = true;
pendingNumDocs.addAndGet(-numDocsInRAM);
try {
@ -513,7 +512,7 @@ class DocumentsWriterPerThread {
}
}
private void maybeAbort(String location) {
private void maybeAbort(String location) throws IOException {
if (hasHitAbortingException() && aborted == false) {
// if we are already aborted don't do anything here
try {