SOLR-6268: HdfsUpdateLog has a race condition that can expose a closed HDFS FileSystem instance and should close it's FileSystem instance if either inherited close method is called.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1619402 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2014-08-21 14:16:07 +00:00
parent f979dee252
commit e681856c55
3 changed files with 32 additions and 25 deletions

View File

@ -299,6 +299,9 @@ Bug Fixes
* SOLR-6393: TransactionLog replay performance on HDFS is very poor. (Mark Miller) * SOLR-6393: TransactionLog replay performance on HDFS is very poor. (Mark Miller)
* SOLR-6268: HdfsUpdateLog has a race condition that can expose a closed HDFS FileSystem instance and should
close it's FileSystem instance if either inherited close method is called. (Mark Miller)
Optimizations Optimizations
--------------------- ---------------------

View File

@ -274,21 +274,12 @@ public class HdfsTransactionLog extends TransactionLog {
try { try {
synchronized (this) { synchronized (this) {
fos.flushBuffer(); fos.flushBuffer();
// we must flush to hdfs
// TODO: we probably don't need to
// hsync below if we do this - I
// think they are equivalent.
tlogOutStream.hflush();
} }
if (syncLevel == UpdateLog.SyncLevel.FSYNC) { if (syncLevel == UpdateLog.SyncLevel.FSYNC) {
// Since fsync is outside of synchronized block, we can end up with a partial
// last record on power failure (which is OK, and does not represent an error...
// we just need to be aware of it when reading).
//raf.getFD().sync();
tlogOutStream.hsync(); tlogOutStream.hsync();
} else {
tlogOutStream.hflush();
} }
} catch (IOException e) { } catch (IOException e) {
@ -304,20 +295,25 @@ public class HdfsTransactionLog extends TransactionLog {
} }
synchronized (this) { synchronized (this) {
fos.flush(); fos.flushBuffer();
}
tlogOutStream.hflush(); tlogOutStream.hflush();
fos.close();
tlogOutStream.close(); tlogOutStream.close();
}
} catch (IOException e) {
log.error("Exception closing tlog.", e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
if (deleteOnClose) { if (deleteOnClose) {
try {
fs.delete(tlogFile, true); fs.delete(tlogFile, true);
}
} catch (IOException e) { } catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} }
} }
}
}
public String toString() { public String toString() {
return "hdfs tlog{file=" + tlogFile.toString() + " refcount=" + refcount.get() + "}"; return "hdfs tlog{file=" + tlogFile.toString() + " refcount=" + refcount.get() + "}";

View File

@ -114,16 +114,18 @@ public class HdfsUpdateLog extends UpdateLog {
} }
} }
FileSystem oldFs = fs;
try { try {
if (fs != null) { fs = FileSystem.newInstance(new Path(dataDir).toUri(), getConf());
fs.close();
}
} catch (IOException e) { } catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e); throw new SolrException(ErrorCode.SERVER_ERROR, e);
} }
try { try {
fs = FileSystem.newInstance(new Path(dataDir).toUri(), getConf()); if (oldFs != null) {
oldFs.close();
}
} catch (IOException e) { } catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e); throw new SolrException(ErrorCode.SERVER_ERROR, e);
} }
@ -278,8 +280,14 @@ public class HdfsUpdateLog extends UpdateLog {
@Override @Override
public void close(boolean committed) { public void close(boolean committed) {
synchronized (this) { close(committed, false);
super.close(committed); }
@Override
public void close(boolean committed, boolean deleteOnClose) {
try {
super.close(committed, deleteOnClose);
} finally {
IOUtils.closeQuietly(fs); IOUtils.closeQuietly(fs);
} }
} }