HBASE-7878 revert due to TestHLogSplit test failure
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1451086 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c11d251235
commit
dc470e0be7
|
@ -55,8 +55,6 @@ public class FSHDFSUtils extends FSUtils{
|
||||||
*/
|
*/
|
||||||
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
|
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
|
||||||
|
|
||||||
public static final String TEST_TRIGGER_DFS_APPEND = "hbase.test.trigger.dfs.append";
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
|
public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf)
|
||||||
throws IOException{
|
throws IOException{
|
||||||
|
@ -74,35 +72,22 @@ public class FSHDFSUtils extends FSUtils{
|
||||||
|
|
||||||
// Trying recovery
|
// Trying recovery
|
||||||
boolean recovered = false;
|
boolean recovered = false;
|
||||||
long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 300000);
|
|
||||||
// conf parameter passed from unit test, indicating whether fs.append() should be triggered
|
|
||||||
boolean triggerAppend = conf.getBoolean(TEST_TRIGGER_DFS_APPEND, false);
|
|
||||||
Exception ex = null;
|
|
||||||
while (!recovered) {
|
while (!recovered) {
|
||||||
try {
|
try {
|
||||||
try {
|
try {
|
||||||
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||||
if (triggerAppend) throw new IOException();
|
DistributedFileSystem.class.getMethod("recoverLease", new Class[] { Path.class }).invoke(
|
||||||
try {
|
dfs, p);
|
||||||
recovered = (Boolean) DistributedFileSystem.class.getMethod(
|
|
||||||
"recoverLease", new Class[] { Path.class }).invoke(dfs, p);
|
|
||||||
} catch (InvocationTargetException ite) {
|
} catch (InvocationTargetException ite) {
|
||||||
// function was properly called, but threw it's own exception
|
// function was properly called, but threw it's own exception
|
||||||
throw (IOException) ite.getCause();
|
throw (IOException) ite.getCause();
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
|
LOG.debug("Failed fs.recoverLease invocation, " + e.toString() +
|
||||||
", trying fs.append instead");
|
", trying fs.append instead");
|
||||||
ex = e;
|
|
||||||
}
|
|
||||||
if (ex != null || System.currentTimeMillis() - startWaiting > recoveryTimeout) {
|
|
||||||
ex = null; // assume the following append() call would succeed
|
|
||||||
LOG.debug("trying fs.append for " + p);
|
|
||||||
FSDataOutputStream out = fs.append(p);
|
FSDataOutputStream out = fs.append(p);
|
||||||
out.close();
|
out.close();
|
||||||
recovered = true;
|
|
||||||
}
|
}
|
||||||
if (recovered) break;
|
recovered = true;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = RemoteExceptionHandler.checkIOException(e);
|
e = RemoteExceptionHandler.checkIOException(e);
|
||||||
if (e instanceof AlreadyBeingCreatedException) {
|
if (e instanceof AlreadyBeingCreatedException) {
|
||||||
|
@ -126,9 +111,9 @@ public class FSHDFSUtils extends FSUtils{
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ex) {
|
||||||
InterruptedIOException iioe = new InterruptedIOException();
|
InterruptedIOException iioe = new InterruptedIOException();
|
||||||
iioe.initCause(ie);
|
iioe.initCause(ex);
|
||||||
throw iioe;
|
throw iioe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
|
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSHDFSUtils;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.Coprocessor;
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||||
|
@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
|
@ -99,7 +99,6 @@ public class TestHLog {
|
||||||
// Make block sizes small.
|
// Make block sizes small.
|
||||||
TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
|
TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
|
||||||
// needed for testAppendClose()
|
// needed for testAppendClose()
|
||||||
TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
|
|
||||||
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
|
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
|
||||||
// quicker heartbeat interval for faster DN death notification
|
// quicker heartbeat interval for faster DN death notification
|
||||||
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
|
TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
|
||||||
|
@ -371,30 +370,18 @@ public class TestHLog {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
// For this test to pass, requires:
|
||||||
* We pass different values to recoverFileLease() so that different code paths are covered
|
// 1. HDFS-200 (append support)
|
||||||
*
|
// 2. HDFS-988 (SafeMode should freeze file operations
|
||||||
* For this test to pass, requires:
|
// [FSNamesystem.nextGenerationStampForBlock])
|
||||||
* 1. HDFS-200 (append support)
|
// 3. HDFS-142 (on restart, maintain pendingCreates)
|
||||||
* 2. HDFS-988 (SafeMode should freeze file operations
|
|
||||||
* [FSNamesystem.nextGenerationStampForBlock])
|
|
||||||
* 3. HDFS-142 (on restart, maintain pendingCreates)
|
|
||||||
*/
|
|
||||||
@Test
|
@Test
|
||||||
public void testAppendClose() throws Exception {
|
public void testAppendClose() throws Exception {
|
||||||
testAppendClose(true);
|
|
||||||
testAppendClose(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @param triggerDirectAppend whether to trigger direct call of fs.append()
|
|
||||||
*/
|
|
||||||
public void testAppendClose(final boolean triggerDirectAppend) throws Exception {
|
|
||||||
byte [] tableName = Bytes.toBytes(getName());
|
byte [] tableName = Bytes.toBytes(getName());
|
||||||
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
||||||
|
|
||||||
HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir" + triggerDirectAppend,
|
HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir",
|
||||||
"hlogdir_archive", conf);
|
"hlogdir_archive", conf);
|
||||||
final int total = 20;
|
final int total = 20;
|
||||||
|
|
||||||
|
@ -469,7 +456,6 @@ public class TestHLog {
|
||||||
public Exception exception = null;
|
public Exception exception = null;
|
||||||
public void run() {
|
public void run() {
|
||||||
try {
|
try {
|
||||||
rlConf.setBoolean(FSHDFSUtils.TEST_TRIGGER_DFS_APPEND, triggerDirectAppend);
|
|
||||||
FSUtils.getInstance(fs, rlConf)
|
FSUtils.getInstance(fs, rlConf)
|
||||||
.recoverFileLease(recoveredFs, walPath, rlConf);
|
.recoverFileLease(recoveredFs, walPath, rlConf);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
Loading…
Reference in New Issue