HDFS-8209. Support different number of datanode directories in MiniDFSCluster. (Contributed by surendra singh lilhore)

This commit is contained in:
Vinayakumar B 2015-05-08 15:03:44 +05:30
parent ecfa052274
commit 4c6816faf8
3 changed files with 49 additions and 1 deletions

View File

@ -684,6 +684,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8175. Provide information on snapshotDiff for supporting the comparison HDFS-8175. Provide information on snapshotDiff for supporting the comparison
between snapshot and current status (J.Andreina via vinayakumarb) between snapshot and current status (J.Andreina via vinayakumarb)
HDFS-8209. Support different number of datanode directories in MiniDFSCluster.
(surendra singh lilhore via vinayakumarb)
Release 2.7.1 - UNRELEASED Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1211,8 +1211,10 @@ public void waitClusterUp() throws IOException {
String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException { String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
assert storageTypes == null || storageTypes.length == storagesPerDatanode;
for (int j = 0; j < storagesPerDatanode; ++j) { for (int j = 0; j < storagesPerDatanode; ++j) {
if ((storageTypes != null) && (j >= storageTypes.length)) {
break;
}
File dir = getInstanceStorageDir(dnIndex, j); File dir = getInstanceStorageDir(dnIndex, j);
dir.mkdirs(); dir.mkdirs();
if (!dir.isDirectory()) { if (!dir.isDirectory()) {

View File

@ -23,9 +23,13 @@
import static org.junit.Assume.assumeTrue; import static org.junit.Assume.assumeTrue;
import java.io.File; import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -139,4 +143,43 @@ public void testClusterSetDatanodeHostname() throws Throwable {
MiniDFSCluster.shutdownCluster(cluster5); MiniDFSCluster.shutdownCluster(cluster5);
} }
} }
@Test
public void testClusterSetDatanodeDifferentStorageType() throws IOException {
final Configuration conf = new HdfsConfiguration();
StorageType[][] storageType = new StorageType[][] {
{StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK},
{StorageType.ARCHIVE}};
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).storageTypes(storageType).build();
try {
cluster.waitActive();
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
// Check the number of directory in DN's
for (int i = 0; i < storageType.length; i++) {
assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf())
.size(), storageType[i].length);
}
} finally {
MiniDFSCluster.shutdownCluster(cluster);
}
}
@Test
public void testClusterNoStorageTypeSetForDatanodes() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).build();
try {
cluster.waitActive();
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
// Check the number of directory in DN's
for (DataNode datanode : dataNodes) {
assertEquals(DataNode.getStorageLocations(datanode.getConf()).size(),
2);
}
} finally {
MiniDFSCluster.shutdownCluster(cluster);
}
}
} }