HDFS-6257. TestCacheDirectives#testExceedsCapacity fails occasionally (cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1601473 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2014-06-09 18:39:37 +00:00
parent 424fd9494f
commit 8b2b7ff0ae
2 changed files with 26 additions and 12 deletions

View File

@ -657,6 +657,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6500. Snapshot shouldn't be removed silently after renaming to an
existing snapshot. (Nicholas SZE via junping_du)
HDFS-6257. TestCacheDirectives#testExceedsCapacity fails occasionally
(cmccabe)
Release 2.4.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -72,7 +72,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.nativeio.NativeIO;
@ -1401,6 +1403,20 @@ public class TestCacheDirectives {
.build());
}
/**
* Check that the NameNode is not attempting to cache anything.
*/
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
throws Exception {
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
for (DataNode dn : cluster.getDataNodes()) {
DatanodeDescriptor descriptor =
datanodeManager.getDatanode(dn.getDatanodeId());
Assert.assertTrue(descriptor.getPendingCached().isEmpty());
}
}
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
// Create a giant file
@ -1418,21 +1434,16 @@ public class TestCacheDirectives {
.setPath(fileName).setReplication((short) 1).build());
waitForCachedBlocks(namenode, -1, numCachedReplicas,
"testExceeds:1");
// Check that no DNs saw an excess CACHE message
int lines = appender.countLinesWithMessage(
"more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
checkPendingCachedEmpty(cluster);
Thread.sleep(1000);
checkPendingCachedEmpty(cluster);
// Try creating a file with giant-sized blocks that exceed cache capacity
dfs.delete(fileName, false);
DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
(short) 1, 0xFADED);
// Nothing will get cached, so just force sleep for a bit
Thread.sleep(4000);
// Still should not see any excess commands
lines = appender.countLinesWithMessage(
"more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
checkPendingCachedEmpty(cluster);
Thread.sleep(1000);
checkPendingCachedEmpty(cluster);
}
}