HDFS-8361. Choose SSD over DISK in block placement.

This commit is contained in:
Tsz-Wo Nicholas Sze 2015-06-15 17:18:09 -07:00
parent 95483513ed
commit 10b97f6655
3 changed files with 80 additions and 3 deletions

View File

@ -33,10 +33,11 @@
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum StorageType {
DISK(false),
// sorted by the speed of the storage types, from fast to slow
RAM_DISK(true),
SSD(false),
ARCHIVE(false),
RAM_DISK(true);
DISK(false),
ARCHIVE(false);
private final boolean isTransient;

View File

@ -583,6 +583,8 @@ Release 2.7.1 - UNRELEASED
HDFS-8521. Add VisibleForTesting annotation to
BlockPoolSlice#selectReplicaToDelete. (cmccabe)
HDFS-8361. Choose SSD over DISK in block placement. (szetszwo)
OPTIMIZATIONS
BUG FIXES

View File

@ -26,6 +26,7 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -40,6 +41,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetworkTopology;
@ -1175,6 +1177,50 @@ public void testGetAllStoragePolicies() throws Exception {
}
}
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] diskStorages
= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
for(int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i],
new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
@ -1256,4 +1302,32 @@ public void testGetAllStoragePoliciesFromFs() throws IOException {
cluster.shutdown();
}
}
@Test
public void testStorageType() {
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
//put storage type is reversed order
map.put(StorageType.ARCHIVE, 1);
map.put(StorageType.DISK, 1);
map.put(StorageType.SSD, 1);
map.put(StorageType.RAM_DISK, 1);
{
final Iterator<StorageType> i = map.keySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next());
Assert.assertEquals(StorageType.SSD, i.next());
Assert.assertEquals(StorageType.DISK, i.next());
Assert.assertEquals(StorageType.ARCHIVE, i.next());
}
{
final Iterator<Map.Entry<StorageType, Integer>> i
= map.entrySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
Assert.assertEquals(StorageType.SSD, i.next().getKey());
Assert.assertEquals(StorageType.DISK, i.next().getKey());
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
}
}
}