From 10b97f66554ac192f9b17045ff029de4fccc543e Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Mon, 15 Jun 2015 17:18:09 -0700 Subject: [PATCH] HDFS-8361. Choose SSD over DISK in block placement. --- .../org/apache/hadoop/fs/StorageType.java | 7 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/TestBlockStoragePolicy.java | 74 +++++++++++++++++++ 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index 68069d7256d..0948801a594 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -33,10 +33,11 @@ import org.apache.hadoop.util.StringUtils; @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { - DISK(false), + // sorted by the speed of the storage types, from fast to slow + RAM_DISK(true), SSD(false), - ARCHIVE(false), - RAM_DISK(true); + DISK(false), + ARCHIVE(false); private final boolean isTransient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d59d0131927..baf98ea067f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -583,6 +583,8 @@ Release 2.7.1 - UNRELEASED HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe) + HDFS-8361. Choose SSD over DISK in block placement. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index c0a9861ea68..0cf9124a779 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -26,6 +26,7 @@ import java.util.*; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FSDataOutputStream; @@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; @@ -1175,6 +1177,50 @@ public class TestBlockStoragePolicy { } } + @Test + public void testChooseSsdOverDisk() throws Exception { + BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", + new StorageType[]{StorageType.SSD, StorageType.DISK, + StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); + + final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"}; + final String[] hosts = {"host1", "host2", "host3"}; + final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK}; + + final DatanodeStorageInfo[] diskStorages + = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks); + final DatanodeDescriptor[] dataNodes + = DFSTestUtil.toDatanodeDescriptor(diskStorages); + for(int i = 0; i < dataNodes.length; i++) { + BlockManagerTestUtil.updateStorage(dataNodes[i], + new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, + StorageType.SSD)); + } + + FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + DFSTestUtil.formatNameNode(conf); + NameNode namenode = new NameNode(conf); + + final BlockManager bm = namenode.getNamesystem().getBlockManager(); + BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); + NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); + for (DatanodeDescriptor datanode : dataNodes) { + cluster.add(datanode); + } + + DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, + dataNodes[0], Collections.emptyList(), false, + new HashSet(), 0, policy); + System.out.println(policy.getName() + ": " + Arrays.asList(targets)); + Assert.assertEquals(2, targets.length); + Assert.assertEquals(StorageType.SSD, targets[0].getStorageType()); + Assert.assertEquals(StorageType.DISK, targets[1].getStorageType()); + } + @Test public void testGetFileStoragePolicyAfterRestartNN() throws Exception { //HDFS8219 @@ -1256,4 +1302,32 @@ public class TestBlockStoragePolicy { cluster.shutdown(); } } + + @Test + public void testStorageType() { + final EnumMap map = new EnumMap<>(StorageType.class); + + //put storage type is reversed order + map.put(StorageType.ARCHIVE, 1); + map.put(StorageType.DISK, 1); + map.put(StorageType.SSD, 1); + map.put(StorageType.RAM_DISK, 1); + + { + final Iterator i = map.keySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next()); + Assert.assertEquals(StorageType.SSD, i.next()); + Assert.assertEquals(StorageType.DISK, i.next()); + Assert.assertEquals(StorageType.ARCHIVE, i.next()); + } + + { + final Iterator> i + = map.entrySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); + Assert.assertEquals(StorageType.SSD, i.next().getKey()); + Assert.assertEquals(StorageType.DISK, i.next().getKey()); + Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); + } + } }