HDFS-6257. TestCacheDirectives#testExceedsCapacity fails occasionally (cmccabe)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1601474 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
319e422733
commit
7b795f6a7c
|
@ -336,6 +336,9 @@ Release 2.5.0 - UNRELEASED
|
||||||
HDFS-6500. Snapshot shouldn't be removed silently after renaming to an
|
HDFS-6500. Snapshot shouldn't be removed silently after renaming to an
|
||||||
existing snapshot. (Nicholas SZE via junping_du)
|
existing snapshot. (Nicholas SZE via junping_du)
|
||||||
|
|
||||||
|
HDFS-6257. TestCacheDirectives#testExceedsCapacity fails occasionally
|
||||||
|
(cmccabe)
|
||||||
|
|
||||||
Release 2.4.1 - UNRELEASED
|
Release 2.4.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -72,7 +72,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
|
@ -1401,6 +1403,20 @@ public class TestCacheDirectives {
|
||||||
.build());
|
.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the NameNode is not attempting to cache anything.
|
||||||
|
*/
|
||||||
|
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
|
||||||
|
throws Exception {
|
||||||
|
final DatanodeManager datanodeManager =
|
||||||
|
cluster.getNamesystem().getBlockManager().getDatanodeManager();
|
||||||
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
|
DatanodeDescriptor descriptor =
|
||||||
|
datanodeManager.getDatanode(dn.getDatanodeId());
|
||||||
|
Assert.assertTrue(descriptor.getPendingCached().isEmpty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testExceedsCapacity() throws Exception {
|
public void testExceedsCapacity() throws Exception {
|
||||||
// Create a giant file
|
// Create a giant file
|
||||||
|
@ -1418,21 +1434,16 @@ public class TestCacheDirectives {
|
||||||
.setPath(fileName).setReplication((short) 1).build());
|
.setPath(fileName).setReplication((short) 1).build());
|
||||||
waitForCachedBlocks(namenode, -1, numCachedReplicas,
|
waitForCachedBlocks(namenode, -1, numCachedReplicas,
|
||||||
"testExceeds:1");
|
"testExceeds:1");
|
||||||
// Check that no DNs saw an excess CACHE message
|
checkPendingCachedEmpty(cluster);
|
||||||
int lines = appender.countLinesWithMessage(
|
Thread.sleep(1000);
|
||||||
"more bytes in the cache: " +
|
checkPendingCachedEmpty(cluster);
|
||||||
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
|
|
||||||
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
|
|
||||||
// Try creating a file with giant-sized blocks that exceed cache capacity
|
// Try creating a file with giant-sized blocks that exceed cache capacity
|
||||||
dfs.delete(fileName, false);
|
dfs.delete(fileName, false);
|
||||||
DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
|
DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
|
||||||
(short) 1, 0xFADED);
|
(short) 1, 0xFADED);
|
||||||
// Nothing will get cached, so just force sleep for a bit
|
checkPendingCachedEmpty(cluster);
|
||||||
Thread.sleep(4000);
|
Thread.sleep(1000);
|
||||||
// Still should not see any excess commands
|
checkPendingCachedEmpty(cluster);
|
||||||
lines = appender.countLinesWithMessage(
|
|
||||||
"more bytes in the cache: " +
|
|
||||||
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
|
|
||||||
assertEquals("Namenode should not send extra CACHE commands", 0, lines);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue