HDFS-3181. Fix a test case in TestLeaseRecovery2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1331138 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1a76c82a31
commit
3e835c47d1
|
@ -565,6 +565,8 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-3181. Fix a test case in TestLeaseRecovery2. (szetszwo)
|
||||
|
||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||
|
||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -90,7 +92,7 @@ public class TestLeaseRecovery2 {
|
|||
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
|
||||
cluster.waitActive();
|
||||
dfs = (DistributedFileSystem)cluster.getFileSystem();
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -406,17 +408,26 @@ public class TestLeaseRecovery2 {
|
|||
*/
|
||||
@Test
|
||||
public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
|
||||
hardLeaseRecoveryRestartHelper(false);
|
||||
hardLeaseRecoveryRestartHelper(false, -1);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
|
||||
hardLeaseRecoveryRestartHelper(false, 1535);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
|
||||
throws Exception {
|
||||
hardLeaseRecoveryRestartHelper(true);
|
||||
hardLeaseRecoveryRestartHelper(true, -1);
|
||||
}
|
||||
|
||||
public void hardLeaseRecoveryRestartHelper(boolean doRename)
|
||||
public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
|
||||
throws Exception {
|
||||
if (size < 0) {
|
||||
size = AppendTestUtil.nextInt(FILE_SIZE + 1);
|
||||
}
|
||||
|
||||
//create a file
|
||||
String fileStr = "/hardLeaseRecovery";
|
||||
AppendTestUtil.LOG.info("filestr=" + fileStr);
|
||||
|
@ -426,7 +437,6 @@ public class TestLeaseRecovery2 {
|
|||
assertTrue(dfs.dfs.exists(fileStr));
|
||||
|
||||
// write bytes into the file.
|
||||
int size = AppendTestUtil.nextInt(FILE_SIZE);
|
||||
AppendTestUtil.LOG.info("size=" + size);
|
||||
stm.write(buffer, 0, size);
|
||||
|
||||
|
@ -440,6 +450,11 @@ public class TestLeaseRecovery2 {
|
|||
AppendTestUtil.LOG.info("hflush");
|
||||
stm.hflush();
|
||||
|
||||
// check visible length
|
||||
final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
|
||||
Assert.assertEquals(size, in.getVisibleLength());
|
||||
in.close();
|
||||
|
||||
if (doRename) {
|
||||
fileStr += ".renamed";
|
||||
Path renamedPath = new Path(fileStr);
|
||||
|
@ -463,14 +478,11 @@ public class TestLeaseRecovery2 {
|
|||
// Make sure lease recovery begins.
|
||||
Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
|
||||
|
||||
assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
|
||||
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
|
||||
checkLease(fileStr, size);
|
||||
|
||||
cluster.restartNameNode(false);
|
||||
|
||||
assertEquals("lease holder should still be the NN after restart",
|
||||
HdfsServerConstants.NAMENODE_LEASE_HOLDER,
|
||||
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
|
||||
checkLease(fileStr, size);
|
||||
|
||||
// Let the DNs send heartbeats again.
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
|
@ -492,12 +504,12 @@ public class TestLeaseRecovery2 {
|
|||
assertEquals(size, locatedBlocks.getFileLength());
|
||||
|
||||
// make sure that the client can't write data anymore.
|
||||
stm.write('b');
|
||||
try {
|
||||
stm.write('b');
|
||||
stm.hflush();
|
||||
fail("Should not be able to flush after we've lost the lease");
|
||||
} catch (IOException e) {
|
||||
LOG.info("Expceted exception on hflush", e);
|
||||
LOG.info("Expceted exception on write/hflush", e);
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -512,4 +524,16 @@ public class TestLeaseRecovery2 {
|
|||
"File size is good. Now validating sizes from datanodes...");
|
||||
AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
|
||||
}
|
||||
|
||||
static void checkLease(String f, int size) {
|
||||
final String holder = NameNodeAdapter.getLeaseHolderForPath(
|
||||
cluster.getNameNode(), f);
|
||||
if (size == 0) {
|
||||
assertEquals("lease holder should null, file is closed", null, holder);
|
||||
} else {
|
||||
assertEquals("lease holder should now be the NN",
|
||||
HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,7 +126,8 @@ public class NameNodeAdapter {
|
|||
}
|
||||
|
||||
public static String getLeaseHolderForPath(NameNode namenode, String path) {
|
||||
return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
|
||||
Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
|
||||
return l == null? null: l.getHolder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue