HDFS-7127. TestLeaseRecovery leaks MiniDFSCluster instances. Contributed by Chris Nauroth.

This commit is contained in:
cnauroth 2014-09-25 11:24:31 -07:00
parent dc2ebf88a8
commit b607c3c363
2 changed files with 73 additions and 70 deletions

View File

@ -953,6 +953,8 @@ Release 2.6.0 - UNRELEASED
HDFS-7111. TestSafeMode assumes Unix line endings in safe mode tip. HDFS-7111. TestSafeMode assumes Unix line endings in safe mode tip.
(cnauroth) (cnauroth)
HDFS-7127. TestLeaseRecovery leaks MiniDFSCluster instances. (cnauroth)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Test; import org.junit.Test;
public class TestLeaseRecovery { public class TestLeaseRecovery {
@ -48,6 +49,15 @@ public class TestLeaseRecovery {
static final short REPLICATION_NUM = (short)3; static final short REPLICATION_NUM = (short)3;
private static final long LEASE_PERIOD = 300L; private static final long LEASE_PERIOD = 300L;
private MiniDFSCluster cluster;
@After
public void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
static void checkMetaInfo(ExtendedBlock b, DataNode dn static void checkMetaInfo(ExtendedBlock b, DataNode dn
) throws IOException { ) throws IOException {
TestInterDatanodeProtocol.checkMetaInfo(b, dn); TestInterDatanodeProtocol.checkMetaInfo(b, dn);
@ -82,79 +92,71 @@ public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE = 3000; final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
try { //create a file
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); DistributedFileSystem dfs = cluster.getFileSystem();
cluster.waitActive(); String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//create a file //get block info for the last block
DistributedFileSystem dfs = cluster.getFileSystem(); LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
String filestr = "/foo"; dfs.dfs.getNamenode(), filestr);
Path filepath = new Path(filestr); DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); assertEquals(REPLICATION_NUM, datanodeinfos.length);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//get block info for the last block //connect to data nodes
LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( DataNode[] datanodes = new DataNode[REPLICATION_NUM];
dfs.dfs.getNamenode(), filestr); for(int i = 0; i < REPLICATION_NUM; i++) {
DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertEquals(REPLICATION_NUM, datanodeinfos.length); assertTrue(datanodes[i] != null);
//connect to data nodes
DataNode[] datanodes = new DataNode[REPLICATION_NUM];
for(int i = 0; i < REPLICATION_NUM; i++) {
datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
//verify Block Info
ExtendedBlock lastblock = locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for(int i = 0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock, datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
// expire lease to trigger block recovery.
waitLeaseRecovery(cluster);
Block[] updatedmetainfo = new Block[REPLICATION_NUM];
long oldSize = lastblock.getNumBytes();
lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
}
// verify that lease recovery does not occur when namenode is in safemode
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
} }
finally {
if (cluster != null) {cluster.shutdown();} //verify Block Info
ExtendedBlock lastblock = locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for(int i = 0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock, datanodes[i]);
} }
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName);
// expire lease to trigger block recovery.
waitLeaseRecovery(cluster);
Block[] updatedmetainfo = new Block[REPLICATION_NUM];
long oldSize = lastblock.getNumBytes();
lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
}
// verify that lease recovery does not occur when namenode is in safemode
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
} }
/** /**
@ -166,8 +168,7 @@ public void testBlockRecoveryWithLessMetafile() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName()); UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
.build();
Path file = new Path("/testRecoveryFile"); Path file = new Path("/testRecoveryFile");
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
FSDataOutputStream out = dfs.create(file); FSDataOutputStream out = dfs.create(file);