HBASE-5243 LogSyncerThread not getting shutdown waiting for the interrupted flag (Ram)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1234559 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
ramkrishna 2012-01-22 17:43:16 +00:00
parent fbf165d347
commit 3331341429
2 changed files with 9 additions and 1 deletions

View File

@ -9,6 +9,7 @@ Release 0.92.1 - Unreleased
HBASE-5237 Addendum for HBASE-5160 and HBASE-4397 (Ram) HBASE-5237 Addendum for HBASE-5160 and HBASE-4397 (Ram)
HBASE-5235 HLogSplitter writer thread's streams not getting closed when any HBASE-5235 HLogSplitter writer thread's streams not getting closed when any
of the writer threads has exceptions. (Ram) of the writer threads has exceptions. (Ram)
HBASE-5243 LogSyncerThread not getting shutdown waiting for the interrupted flag (Ram)
TESTS TESTS
HBASE-5223 TestMetaReaderEditor is missing call to CatalogTracker.stop() HBASE-5223 TestMetaReaderEditor is missing call to CatalogTracker.stop()

View File

@ -958,6 +958,7 @@ public class HLog implements Syncable {
public void close() throws IOException { public void close() throws IOException {
try { try {
logSyncerThread.interrupt(); logSyncerThread.interrupt();
logSyncerThread.close();
// Make sure we synced everything // Make sure we synced everything
logSyncerThread.join(this.optionalFlushInterval*2); logSyncerThread.join(this.optionalFlushInterval*2);
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -1170,6 +1171,8 @@ public class HLog implements Syncable {
private final long optionalFlushInterval; private final long optionalFlushInterval;
private boolean closeLogSyncer = false;
// List of pending writes to the HLog. There corresponds to transactions // List of pending writes to the HLog. There corresponds to transactions
// that have not yet returned to the client. We keep them cached here // that have not yet returned to the client. We keep them cached here
// instead of writing them to HDFS piecemeal, because the HDFS write // instead of writing them to HDFS piecemeal, because the HDFS write
@ -1187,7 +1190,7 @@ public class HLog implements Syncable {
try { try {
// awaiting with a timeout doesn't always // awaiting with a timeout doesn't always
// throw exceptions on interrupt // throw exceptions on interrupt
while(!this.isInterrupted()) { while(!this.isInterrupted() && !closeLogSyncer) {
try { try {
if (unflushedEntries.get() <= syncedTillHere) { if (unflushedEntries.get() <= syncedTillHere) {
@ -1232,6 +1235,10 @@ public class HLog implements Syncable {
writer.append(e); writer.append(e);
} }
} }
void close(){
closeLogSyncer = true;
}
} }
// sync all known transactions // sync all known transactions