HDFS-16067. Support Append API in NNThroughputBenchmark. Contributed by Renukaprasad C.
This commit is contained in:
parent
47002719f2
commit
6ed7670a93
|
@ -58,6 +58,7 @@ Following are all the operations supported along with their respective operation
|
||||||
|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
|
|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
|
||||||
|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
||||||
|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
||||||
|
|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] |
|
||||||
|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
||||||
|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
|
||||||
|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] |
|
|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] |
|
||||||
|
|
|
@ -826,6 +826,53 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Append file statistics.
|
||||||
|
* Measure how many append calls the name-node can handle per second.
|
||||||
|
*/
|
||||||
|
class AppendFileStats extends OpenFileStats {
|
||||||
|
// Operation types
|
||||||
|
static final String OP_APPEND_NAME = "append";
|
||||||
|
public static final String APPEND_NEW_BLK = "-appendNewBlk";
|
||||||
|
static final String OP_APPEND_USAGE =
|
||||||
|
"-op " + OP_APPEND_NAME + OP_USAGE_ARGS + " [" + APPEND_NEW_BLK + ']';
|
||||||
|
private boolean appendNewBlk = false;
|
||||||
|
|
||||||
|
AppendFileStats(List<String> args) {
|
||||||
|
super(args);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
String getOpName() {
|
||||||
|
return OP_APPEND_NAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void parseArguments(List<String> args) {
|
||||||
|
appendNewBlk = args.contains(APPEND_NEW_BLK);
|
||||||
|
if (this.appendNewBlk) {
|
||||||
|
args.remove(APPEND_NEW_BLK);
|
||||||
|
}
|
||||||
|
super.parseArguments(args);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
long executeOp(int daemonId, int inputIdx, String ignore)
|
||||||
|
throws IOException {
|
||||||
|
long start = Time.now();
|
||||||
|
String src = fileNames[daemonId][inputIdx];
|
||||||
|
EnumSetWritable<CreateFlag> enumSet = null;
|
||||||
|
if (appendNewBlk) {
|
||||||
|
enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.NEW_BLOCK));
|
||||||
|
} else {
|
||||||
|
enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
|
||||||
|
}
|
||||||
|
clientProto.append(src, "TestClient", enumSet);
|
||||||
|
long end = Time.now();
|
||||||
|
return end - start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List file status statistics.
|
* List file status statistics.
|
||||||
*
|
*
|
||||||
|
@ -1434,6 +1481,7 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
+ " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
|
+ " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
|
||||||
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
|
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
|
||||||
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
|
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
|
||||||
|
+ " | \n\t" + AppendFileStats.OP_APPEND_USAGE
|
||||||
+ " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
|
+ " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
|
||||||
+ " | \n\t" + RenameFileStats.OP_RENAME_USAGE
|
+ " | \n\t" + RenameFileStats.OP_RENAME_USAGE
|
||||||
+ " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
|
+ " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
|
||||||
|
@ -1496,6 +1544,10 @@ public class NNThroughputBenchmark implements Tool {
|
||||||
opStat = new DeleteFileStats(args);
|
opStat = new DeleteFileStats(args);
|
||||||
ops.add(opStat);
|
ops.add(opStat);
|
||||||
}
|
}
|
||||||
|
if (runAll || AppendFileStats.OP_APPEND_NAME.equals(type)) {
|
||||||
|
opStat = new AppendFileStats(args);
|
||||||
|
ops.add(opStat);
|
||||||
|
}
|
||||||
if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
|
if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
|
||||||
opStat = new FileStatusStats(args);
|
opStat = new FileStatusStats(args);
|
||||||
ops.add(opStat);
|
ops.add(opStat);
|
||||||
|
|
|
@ -26,8 +26,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.util.ExitUtil;
|
import org.apache.hadoop.util.ExitUtil;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -120,4 +123,47 @@ public class TestNNThroughputBenchmark {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
|
||||||
|
* for append operation.
|
||||||
|
*/
|
||||||
|
@Test(timeout = 120000)
|
||||||
|
public void testNNThroughputForAppendOp() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
final Configuration benchConf = new HdfsConfiguration();
|
||||||
|
benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
|
||||||
|
FileSystem.setDefaultUri(benchConf, cluster.getURI());
|
||||||
|
NNThroughputBenchmark.runBenchmark(benchConf,
|
||||||
|
new String[] {"-op", "create", "-keepResults", "-files", "3",
|
||||||
|
"-close" });
|
||||||
|
FSNamesystem fsNamesystem = cluster.getNamesystem();
|
||||||
|
DirectoryListing listing =
|
||||||
|
fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
|
||||||
|
HdfsFileStatus[] partialListing = listing.getPartialListing();
|
||||||
|
|
||||||
|
NNThroughputBenchmark.runBenchmark(benchConf,
|
||||||
|
new String[] {"-op", "append", "-files", "3", "-useExisting" });
|
||||||
|
listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
|
||||||
|
HdfsFileStatus[] partialListingAfter = listing.getPartialListing();
|
||||||
|
|
||||||
|
Assert.assertEquals(partialListing.length, partialListingAfter.length);
|
||||||
|
for (int i = 0; i < partialListing.length; i++) {
|
||||||
|
//Check the modification time after append operation
|
||||||
|
Assert.assertNotEquals(partialListing[i].getModificationTime(),
|
||||||
|
partialListingAfter[i].getModificationTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue