From 77d9c6d0f75ff5ca690d9aeb2ae6a5e27418b23c Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Wed, 28 Oct 2020 09:13:25 +0800 Subject: [PATCH] HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416) (cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e) --- .../namenode/NNThroughputBenchmark.java | 41 +++++++++++++------ .../namenode/TestNNThroughputBenchmark.java | 8 +++- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 2147129eb45..b120d7a6a35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -168,6 +168,7 @@ public class NNThroughputBenchmark implements Tool { protected final String baseDir; protected short replication; + protected int blockSize; protected int numThreads = 0; // number of threads protected int numOpsRequired = 0; // number of operations requested protected int numOpsExecuted = 0; // number of operations executed @@ -229,6 +230,7 @@ public class NNThroughputBenchmark implements Tool { OperationStatsBase() { baseDir = BASE_DIR_NAME + "/" + getOpName(); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); + blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); numOpsRequired = 10; numThreads = 3; logLevel = Level.ERROR; @@ -516,7 +518,8 @@ public class NNThroughputBenchmark implements Tool { // Operation types static final String OP_CREATE_NAME = "create"; static final String OP_CREATE_USAGE = - "-op create [-threads T] [-files N] [-filesPerDir P] [-close]"; + "-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-close]"; protected FileNameGenerator nameGenerator; protected String[][] fileNames; @@ -541,6 +544,9 @@ public class NNThroughputBenchmark implements Tool { if(args.get(i).equals("-files")) { if(i+1 == args.size()) printUsage(); numOpsRequired = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-threads")) { if(i+1 == args.size()) printUsage(); numThreads = Integer.parseInt(args.get(++i)); @@ -596,7 +602,7 @@ public class NNThroughputBenchmark implements Tool { clientProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, - replication, BLOCK_SIZE, CryptoProtocolVersion.supported()); + replication, blockSize, CryptoProtocolVersion.supported()); long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = clientProto.complete(fileNames[daemonId][inputIdx], @@ -716,7 +722,8 @@ public class NNThroughputBenchmark implements Tool { // Operation types static final String OP_OPEN_NAME = "open"; static final String OP_USAGE_ARGS = - " [-threads T] [-files N] [-filesPerDir P] [-useExisting]"; + " [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-useExisting]"; static final String OP_OPEN_USAGE = "-op " + OP_OPEN_NAME + OP_USAGE_ARGS; @@ -748,6 +755,7 @@ public class NNThroughputBenchmark implements Tool { "-op", "create", "-threads", String.valueOf(this.numThreads), "-files", String.valueOf(numOpsRequired), + "-blockSize", String.valueOf(blockSize), "-filesPerDir", String.valueOf(nameGenerator.getFilesPerDirectory()), "-close"}; @@ -778,7 +786,8 @@ public class NNThroughputBenchmark implements Tool { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = Time.now(); - clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE); + clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, + blockSize); long end = Time.now(); return end-start; } @@ -1068,7 +1077,7 @@ public class NNThroughputBenchmark implements Tool { static final String OP_BLOCK_REPORT_NAME = "blockReport"; static final String OP_BLOCK_REPORT_USAGE = "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F]"; + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S]"; private int blocksPerReport; private int blocksPerFile; @@ -1115,6 +1124,9 @@ public class NNThroughputBenchmark implements Tool { } else if(args.get(i).equals("-blocksPerFile")) { if(i+1 == args.size()) printUsage(); blocksPerFile = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1145,7 +1157,7 @@ public class NNThroughputBenchmark implements Tool { String fileName = nameGenerator.getNextFileName("ThroughputBench"); clientProto.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, - BLOCK_SIZE, CryptoProtocolVersion.supported()); + blockSize, CryptoProtocolVersion.supported()); ExtendedBlock lastBlock = addBlocks(fileName, clientName); clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID); } @@ -1256,8 +1268,9 @@ public class NNThroughputBenchmark implements Tool { class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_NAME = "replication"; static final String OP_REPLICATION_USAGE = - "-op replication [-datanodes T] [-nodesToDecommission D] " + - "[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]"; + "-op replication [-datanodes T] [-nodesToDecommission D] " + + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " + + "[-replication R]"; private final BlockReportStats blockReportObject; private int numDatanodes; @@ -1282,10 +1295,11 @@ public class NNThroughputBenchmark implements Tool { / (numDatanodes*numDatanodes); String[] blkReportArgs = { - "-op", "blockReport", - "-datanodes", String.valueOf(numDatanodes), - "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), - "-blocksPerFile", String.valueOf(numDatanodes)}; + "-op", "blockReport", + "-datanodes", String.valueOf(numDatanodes), + "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), + "-blocksPerFile", String.valueOf(numDatanodes), + "-blockSize", String.valueOf(blockSize)}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1315,6 +1329,9 @@ public class NNThroughputBenchmark implements Tool { } else if(args.get(i).equals("-replication")) { if(i+1 == args.size()) printUsage(); replication = Short.parseShort(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 9f1ebd122c5..ec0d6df232d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -49,6 +49,7 @@ public class TestNNThroughputBenchmark { @Test public void testNNThroughput() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -63,6 +64,7 @@ public class TestNNThroughputBenchmark { @Test(timeout = 120000) public void testNNThroughputWithFsOption() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -84,6 +86,7 @@ public class TestNNThroughputBenchmark { cluster.waitActive(); final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); FileSystem.setDefaultUri(benchConf, cluster.getURI()); NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-op", "all"}); } finally { @@ -101,12 +104,15 @@ public class TestNNThroughputBenchmark { public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); - NNThroughputBenchmark.runBenchmark(new HdfsConfiguration(), + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-fs", cluster.getURI().toString(), "-op", "all"}); } finally { if (cluster != null) {