diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 245f5be39c5..513c6094c57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -169,6 +169,7 @@ public class NNThroughputBenchmark implements Tool { protected final String baseDir; protected short replication; + protected int blockSize; protected int numThreads = 0; // number of threads protected int numOpsRequired = 0; // number of operations requested protected int numOpsExecuted = 0; // number of operations executed @@ -230,6 +231,7 @@ public class NNThroughputBenchmark implements Tool { OperationStatsBase() { baseDir = BASE_DIR_NAME + "/" + getOpName(); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); + blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); numOpsRequired = 10; numThreads = 3; logLevel = Level.ERROR; @@ -517,7 +519,8 @@ public class NNThroughputBenchmark implements Tool { // Operation types static final String OP_CREATE_NAME = "create"; static final String OP_CREATE_USAGE = - "-op create [-threads T] [-files N] [-filesPerDir P] [-close]"; + "-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-close]"; protected FileNameGenerator nameGenerator; protected String[][] fileNames; @@ -542,6 +545,9 @@ public class NNThroughputBenchmark implements Tool { if(args.get(i).equals("-files")) { if(i+1 == args.size()) printUsage(); numOpsRequired = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-threads")) { if(i+1 == args.size()) printUsage(); numThreads = Integer.parseInt(args.get(++i)); @@ -598,7 +604,7 @@ public class NNThroughputBenchmark implements Tool { FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, - replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null, + replication, blockSize, CryptoProtocolVersion.supported(), null, null); long end = Time.now(); for (boolean written = !closeUponCreate; !written; @@ -720,7 +726,8 @@ public class NNThroughputBenchmark implements Tool { // Operation types static final String OP_OPEN_NAME = "open"; static final String OP_USAGE_ARGS = - " [-threads T] [-files N] [-filesPerDir P] [-useExisting]"; + " [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-useExisting]"; static final String OP_OPEN_USAGE = "-op " + OP_OPEN_NAME + OP_USAGE_ARGS; @@ -752,6 +759,7 @@ public class NNThroughputBenchmark implements Tool { "-op", "create", "-threads", String.valueOf(this.numThreads), "-files", String.valueOf(numOpsRequired), + "-blockSize", String.valueOf(blockSize), "-filesPerDir", String.valueOf(nameGenerator.getFilesPerDirectory()), "-close"}; @@ -782,7 +790,8 @@ public class NNThroughputBenchmark implements Tool { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = Time.now(); - clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE); + clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, + blockSize); long end = Time.now(); return end-start; } @@ -1072,7 +1081,7 @@ public class NNThroughputBenchmark implements Tool { static final String OP_BLOCK_REPORT_NAME = "blockReport"; static final String OP_BLOCK_REPORT_USAGE = "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F]"; + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S]"; private int blocksPerReport; private int blocksPerFile; @@ -1119,6 +1128,9 @@ public class NNThroughputBenchmark implements Tool { } else if(args.get(i).equals("-blocksPerFile")) { if(i+1 == args.size()) printUsage(); blocksPerFile = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1149,7 +1161,7 @@ public class NNThroughputBenchmark implements Tool { String fileName = nameGenerator.getNextFileName("ThroughputBench"); clientProto.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, - BLOCK_SIZE, CryptoProtocolVersion.supported(), null, null); + blockSize, CryptoProtocolVersion.supported(), null, null); ExtendedBlock lastBlock = addBlocks(fileName, clientName); clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID); } @@ -1260,8 +1272,9 @@ public class NNThroughputBenchmark implements Tool { class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_NAME = "replication"; static final String OP_REPLICATION_USAGE = - "-op replication [-datanodes T] [-nodesToDecommission D] " + - "[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]"; + "-op replication [-datanodes T] [-nodesToDecommission D] " + + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " + + "[-replication R]"; private final BlockReportStats blockReportObject; private int numDatanodes; @@ -1286,10 +1299,11 @@ public class NNThroughputBenchmark implements Tool { / (numDatanodes*numDatanodes); String[] blkReportArgs = { - "-op", "blockReport", - "-datanodes", String.valueOf(numDatanodes), - "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), - "-blocksPerFile", String.valueOf(numDatanodes)}; + "-op", "blockReport", + "-datanodes", String.valueOf(numDatanodes), + "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), + "-blocksPerFile", String.valueOf(numDatanodes), + "-blockSize", String.valueOf(blockSize)}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1319,6 +1333,9 @@ public class NNThroughputBenchmark implements Tool { } else if(args.get(i).equals("-replication")) { if(i+1 == args.size()) printUsage(); replication = Short.parseShort(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 9f1ebd122c5..ec0d6df232d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -49,6 +49,7 @@ public class TestNNThroughputBenchmark { @Test public void testNNThroughput() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -63,6 +64,7 @@ public class TestNNThroughputBenchmark { @Test(timeout = 120000) public void testNNThroughputWithFsOption() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -84,6 +86,7 @@ public class TestNNThroughputBenchmark { cluster.waitActive(); final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); FileSystem.setDefaultUri(benchConf, cluster.getURI()); NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-op", "all"}); } finally { @@ -101,12 +104,15 @@ public class TestNNThroughputBenchmark { public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); - NNThroughputBenchmark.runBenchmark(new HdfsConfiguration(), + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-fs", cluster.getURI().toString(), "-op", "all"}); } finally { if (cluster != null) {