HBASE-3412 HLogSplitter should handle missing HLogs

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1055563 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2011-01-05 18:05:59 +00:00
parent b7ece04e0f
commit f397fa6309
4 changed files with 45 additions and 1 deletions

View File

@ -815,6 +815,7 @@ Release 0.90.0 - Unreleased
HBASE-3408 AssignmentManager NullPointerException
HBASE-3402 Web UI shows two META regions
HBASE-3409 Failed server shutdown processing when retrying hlog split
HBASE-3412 HLogSplitter should handle missing HLogs
IMPROVEMENTS

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver.wal;
import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
@ -262,7 +263,14 @@ public class HLogSplitter {
processedLogs.add(logPath);
} catch (EOFException eof) {
// truncated files are expected if a RS crashes (see HBASE-2643)
LOG.info("EOF from hlog " + logPath + ". continuing");
LOG.info("EOF from hlog " + logPath + ". Continuing");
processedLogs.add(logPath);
} catch (FileNotFoundException fnfe) {
// A file may be missing if the region server was able to archive it
// before shutting down. This means the edits were persisted already
LOG.info("A log was missing " + logPath +
", probably because it was moved by the" +
" now dead region server. Continuing");
processedLogs.add(logPath);
} catch (IOException e) {
// If the IOE resulted from bad file format,

View File

@ -36,10 +36,12 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.io.SequenceFile;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@ -649,6 +651,11 @@ public class FSUtils {
} catch (InterruptedException ex) {
// ignore it and try again
}
} else if (e instanceof LeaseExpiredException &&
e.getMessage().contains("File does not exist")) {
// This exception comes out instead of FNFE, fix it
throw new FileNotFoundException(
"The given HLog wasn't found at " + p.toString());
} else {
throw new IOException("Failed to open " + p + " for append", e);
}

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.After;
import org.junit.AfterClass;
@ -672,6 +673,33 @@ public class TestHLogSplit {
assertTrue(ioe.toString().contains("Injected"));
}
}
// Test for HBASE-3412
@Test
public void testMovedHLogDuringRecovery() throws Exception {
generateHLogs(-1);
fs.initialize(fs.getUri(), conf);
// This partial mock will throw LEE for every file simulating
// files that were moved
FileSystem spiedFs = Mockito.spy(fs);
// The "File does not exist" part is very important,
// that's how it comes out of HDFS
Mockito.doThrow(new LeaseExpiredException("Injected: File does not exist")).
when(spiedFs).append(Mockito.<Path>any());
HLogSplitter logSplitter = new HLogSplitter(
conf, hbaseDir, hlogDir, oldLogDir, spiedFs);
try {
logSplitter.splitLog();
assertEquals(NUM_WRITERS, fs.listStatus(oldLogDir).length);
assertFalse(fs.exists(hlogDir));
} catch (IOException e) {
fail("There shouldn't be any exception but: " + e.toString());
}
}
/**
* Test log split process with fake data and lots of edits to trigger threading