HBASE-7878 recoverFileLease does not check return value of recoverLease, revert

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1458600 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2013-03-20 01:09:28 +00:00
parent 1a4f478909
commit e253150cf3
4 changed files with 21 additions and 69 deletions

View File

@ -26,7 +26,6 @@ import java.lang.reflect.InvocationTargetException;
import java.text.ParseException; import java.text.ParseException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -323,21 +322,10 @@ public class HLogSplitter {
} }
status.setStatus("Log splits complete. Checking for orphaned logs."); status.setStatus("Log splits complete. Checking for orphaned logs.");
FileStatus[] currFiles = fs.listStatus(srcDir); if (fs.listStatus(srcDir).length > processedLogs.size()
if (currFiles.length > processedLogs.size()
+ corruptedLogs.size()) { + corruptedLogs.size()) {
Set<Path> fileSet = new HashSet<Path>();
for (FileStatus fstat : currFiles) {
fileSet.add(fstat.getPath());
}
for (Path p : processedLogs) {
fileSet.remove(p);
}
for (Path p : corruptedLogs) {
fileSet.remove(p);
}
throw new OrphanHLogAfterSplitException( throw new OrphanHLogAfterSplitException(
"Discovered orphan hlog after split. " + fileSet.iterator().next() + " Maybe the " "Discovered orphan hlog after split. Maybe the "
+ "HRegionServer was not dead when we started"); + "HRegionServer was not dead when we started");
} }
} finally { } finally {

View File

@ -55,8 +55,6 @@ public class FSHDFSUtils extends FSUtils{
*/ */
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
public static final String TEST_TRIGGER_DFS_APPEND = "hbase.test.trigger.dfs.append";
@Override @Override
public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf) public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
throws IOException{ throws IOException{
@ -74,37 +72,22 @@ public class FSHDFSUtils extends FSUtils{
// Trying recovery // Trying recovery
boolean recovered = false; boolean recovered = false;
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 300000);
// conf parameter passed from unit test, indicating whether fs.append() should be triggered
boolean triggerAppend = conf.getBoolean(TEST_TRIGGER_DFS_APPEND, false);
Exception ex = null;
while (!recovered) { while (!recovered) {
try { try {
try { try {
DistributedFileSystem dfs = (DistributedFileSystem) fs; DistributedFileSystem dfs = (DistributedFileSystem) fs;
if (triggerAppend) throw new IOException(); DistributedFileSystem.class.getMethod("recoverLease", new Class[] { Path.class }).invoke(
try { dfs, p);
recovered = (Boolean) DistributedFileSystem.class.getMethod( } catch (InvocationTargetException ite) {
"recoverLease", new Class[] { Path.class }).invoke(dfs, p); // function was properly called, but threw it's own exception
if (!recovered) LOG.debug("recoverLease returned false"); throw (IOException) ite.getCause();
} catch (InvocationTargetException ite) {
// function was properly called, but threw it's own exception
throw (IOException) ite.getCause();
}
} catch (Exception e) { } catch (Exception e) {
LOG.debug("Failed fs.recoverLease invocation, " + e.toString() + LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
", trying fs.append instead"); ", trying fs.append instead");
ex = e;
}
if (ex != null || System.currentTimeMillis() - startWaiting > recoveryTimeout) {
LOG.debug("trying fs.append for " + p + " with " + ex);
ex = null; // assume the following append() call would succeed
FSDataOutputStream out = fs.append(p); FSDataOutputStream out = fs.append(p);
out.close(); out.close();
recovered = true;
LOG.debug("fs.append passed");
} }
if (recovered) break; recovered = true;
} catch (IOException e) { } catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e); e = RemoteExceptionHandler.checkIOException(e);
if (e instanceof AlreadyBeingCreatedException) { if (e instanceof AlreadyBeingCreatedException) {
@ -128,9 +111,9 @@ public class FSHDFSUtils extends FSUtils{
} }
try { try {
Thread.sleep(1000); Thread.sleep(1000);
} catch (InterruptedException ie) { } catch (InterruptedException ex) {
InterruptedIOException iioe = new InterruptedIOException(); InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(ie); iioe.initCause(ex);
throw iioe; throw iioe;
} }
} }

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader; import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSHDFSUtils;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.SequenceFile;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -99,7 +99,6 @@ public class TestHLog {
// Make block sizes small. // Make block sizes small.
TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024); TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
// needed for testAppendClose() // needed for testAppendClose()
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// quicker heartbeat interval for faster DN death notification // quicker heartbeat interval for faster DN death notification
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000); TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
@ -371,30 +370,18 @@ public class TestHLog {
} }
} }
/* // For this test to pass, requires:
* We pass different values to recoverFileLease() so that different code paths are covered // 1. HDFS-200 (append support)
* // 2. HDFS-988 (SafeMode should freeze file operations
* For this test to pass, requires: // [FSNamesystem.nextGenerationStampForBlock])
* 1. HDFS-200 (append support) // 3. HDFS-142 (on restart, maintain pendingCreates)
* 2. HDFS-988 (SafeMode should freeze file operations
* [FSNamesystem.nextGenerationStampForBlock])
* 3. HDFS-142 (on restart, maintain pendingCreates)
*/
@Test @Test
public void testAppendClose() throws Exception { public void testAppendClose() throws Exception {
testAppendClose(true);
testAppendClose(false);
}
/*
* @param triggerDirectAppend whether to trigger direct call of fs.append()
*/
public void testAppendClose(final boolean triggerDirectAppend) throws Exception {
byte [] tableName = Bytes.toBytes(getName()); byte [] tableName = Bytes.toBytes(getName());
HRegionInfo regioninfo = new HRegionInfo(tableName, HRegionInfo regioninfo = new HRegionInfo(tableName,
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false); HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir" + triggerDirectAppend, HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
"hlogdir_archive", conf); "hlogdir_archive", conf);
final int total = 20; final int total = 20;
@ -469,7 +456,6 @@ public class TestHLog {
public Exception exception = null; public Exception exception = null;
public void run() { public void run() {
try { try {
rlConf.setBoolean(FSHDFSUtils.TEST_TRIGGER_DFS_APPEND, triggerDirectAppend);
FSUtils.getInstance(fs, rlConf) FSUtils.getInstance(fs, rlConf)
.recoverFileLease(recoveredFs, walPath, rlConf); .recoverFileLease(recoveredFs, walPath, rlConf);
} catch (IOException e) { } catch (IOException e) {

View File

@ -137,8 +137,6 @@ public class TestHLogSplit {
FSUtils.setRootDir(TEST_UTIL.getConfiguration(), HBASEDIR); FSUtils.setRootDir(TEST_UTIL.getConfiguration(), HBASEDIR);
TEST_UTIL.getConfiguration().setClass("hbase.regionserver.hlog.writer.impl", TEST_UTIL.getConfiguration().setClass("hbase.regionserver.hlog.writer.impl",
InstrumentedSequenceFileLogWriter.class, HLog.Writer.class); InstrumentedSequenceFileLogWriter.class, HLog.Writer.class);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
// This is how you turn off shortcircuit read currently. TODO: Fix. Should read config. // This is how you turn off shortcircuit read currently. TODO: Fix. Should read config.
System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
// Create fake maping user to group and set it to the conf. // Create fake maping user to group and set it to the conf.
@ -717,17 +715,14 @@ public class TestHLogSplit {
fs.initialize(fs.getUri(), conf); fs.initialize(fs.getUri(), conf);
Thread zombie = new ZombieNewLogWriterRegionServer(stop); Thread zombie = new ZombieNewLogWriterRegionServer(stop);
List<Path> splits = null;
try { try {
zombie.start(); zombie.start();
try { try {
HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf, HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
HBASEDIR, HLOGDIR, OLDLOGDIR, fs); HBASEDIR, HLOGDIR, OLDLOGDIR, fs);
splits = logSplitter.splitLog(); logSplitter.splitLog();
} catch (IOException ex) {/* expected */} } catch (IOException ex) {/* expected */}
FileStatus[] files = fs.listStatus(HLOGDIR); int logFilesNumber = fs.listStatus(HLOGDIR).length;
if (files == null) fail("no files in " + HLOGDIR + " with splits " + splits);
int logFilesNumber = files.length;
assertEquals("Log files should not be archived if there's an extra file after split", assertEquals("Log files should not be archived if there's an extra file after split",
NUM_WRITERS + 1, logFilesNumber); NUM_WRITERS + 1, logFilesNumber);