HDFS-9379. Make NNThroughputBenchmark support more than 10 datanodes. (Contributed by Mingliang Liu)

This commit is contained in:
Arpit Agarwal 2015-11-06 18:58:49 -08:00
parent 481e7248de
commit 1e0746e756
2 changed files with 4 additions and 5 deletions

View File

@ -785,6 +785,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9236. Missing sanity check for block size during block recovery.
(Tony Wu via Yongjun Zhang)
HDFS-9379. Make NNThroughputBenchmark$BlockReportStats support more than 10
datanodes. (Mingliang Liu via Arpit Agarwal)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -1145,14 +1145,10 @@ public class NNThroughputBenchmark implements Tool {
int nrFiles = (int)Math.ceil((double)nrBlocks / blocksPerFile);
datanodes = new TinyDatanode[nrDatanodes];
// create data-nodes
String prevDNName = "";
for(int idx=0; idx < nrDatanodes; idx++) {
datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
datanodes[idx].register();
assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
: "Data-nodes must be sorted lexicographically.";
datanodes[idx].sendHeartbeat();
prevDNName = datanodes[idx].getXferAddr();
}
// create files
@ -1184,7 +1180,7 @@ public class NNThroughputBenchmark implements Tool {
prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
int dnIdx = dnInfo.getXferPort() - 1;
datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
loc.getBlock().getLocalBlock(),