diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 195a946d8de..82e07991449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -684,6 +684,9 @@ Release 2.8.0 - UNRELEASED HDFS-8175. Provide information on snapshotDiff for supporting the comparison between snapshot and current status (J.Andreina via vinayakumarb) + HDFS-8209. Support different number of datanode directories in MiniDFSCluster. + (surendra singh lilhore via vinayakumarb) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 8aeaef8292c..12ad23ea7c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1211,8 +1211,10 @@ public void waitClusterUp() throws IOException { String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException { StringBuilder sb = new StringBuilder(); - assert storageTypes == null || storageTypes.length == storagesPerDatanode; for (int j = 0; j < storagesPerDatanode; ++j) { + if ((storageTypes != null) && (j >= storageTypes.length)) { + break; + } File dir = getInstanceStorageDir(dnIndex, j); dir.mkdirs(); if (!dir.isDirectory()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index cf29d97007c..3fa852ef75a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -23,9 +23,13 @@ import static org.junit.Assume.assumeTrue; import java.io.File; +import java.io.IOException; +import java.util.ArrayList; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -139,4 +143,43 @@ public void testClusterSetDatanodeHostname() throws Throwable { MiniDFSCluster.shutdownCluster(cluster5); } } + + @Test + public void testClusterSetDatanodeDifferentStorageType() throws IOException { + final Configuration conf = new HdfsConfiguration(); + StorageType[][] storageType = new StorageType[][] { + {StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK}, + {StorageType.ARCHIVE}}; + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).storageTypes(storageType).build(); + try { + cluster.waitActive(); + ArrayList dataNodes = cluster.getDataNodes(); + // Check the number of directory in DN's + for (int i = 0; i < storageType.length; i++) { + assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf()) + .size(), storageType[i].length); + } + } finally { + MiniDFSCluster.shutdownCluster(cluster); + } + } + + @Test + public void testClusterNoStorageTypeSetForDatanodes() throws IOException { + final Configuration conf = new HdfsConfiguration(); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).build(); + try { + cluster.waitActive(); + ArrayList dataNodes = cluster.getDataNodes(); + // Check the number of directory in DN's + for (DataNode datanode : dataNodes) { + assertEquals(DataNode.getStorageLocations(datanode.getConf()).size(), + 2); + } + } finally { + MiniDFSCluster.shutdownCluster(cluster); + } + } }