HDFS-8361. Choose SSD over DISK in block placement.
This commit is contained in:
parent
1b6695a4c0
commit
175e6d120f
|
@ -33,10 +33,11 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public enum StorageType {
|
public enum StorageType {
|
||||||
DISK(false),
|
// sorted by the speed of the storage types, from fast to slow
|
||||||
|
RAM_DISK(true),
|
||||||
SSD(false),
|
SSD(false),
|
||||||
ARCHIVE(false),
|
DISK(false),
|
||||||
RAM_DISK(true);
|
ARCHIVE(false);
|
||||||
|
|
||||||
private final boolean isTransient;
|
private final boolean isTransient;
|
||||||
|
|
||||||
|
|
|
@ -920,6 +920,8 @@ Release 2.7.1 - UNRELEASED
|
||||||
HDFS-8521. Add VisibleForTesting annotation to
|
HDFS-8521. Add VisibleForTesting annotation to
|
||||||
BlockPoolSlice#selectReplicaToDelete. (cmccabe)
|
BlockPoolSlice#selectReplicaToDelete. (cmccabe)
|
||||||
|
|
||||||
|
HDFS-8361. Choose SSD over DISK in block placement. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.util.*;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
@ -40,7 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
@ -1152,6 +1153,50 @@ public class TestBlockStoragePolicy {
|
||||||
Assert.assertEquals(3, targets.length);
|
Assert.assertEquals(3, targets.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testChooseSsdOverDisk() throws Exception {
|
||||||
|
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
|
||||||
|
new StorageType[]{StorageType.SSD, StorageType.DISK,
|
||||||
|
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
|
||||||
|
|
||||||
|
final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
|
||||||
|
final String[] hosts = {"host1", "host2", "host3"};
|
||||||
|
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
|
||||||
|
|
||||||
|
final DatanodeStorageInfo[] diskStorages
|
||||||
|
= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
|
||||||
|
final DatanodeDescriptor[] dataNodes
|
||||||
|
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
|
||||||
|
for(int i = 0; i < dataNodes.length; i++) {
|
||||||
|
BlockManagerTestUtil.updateStorage(dataNodes[i],
|
||||||
|
new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
|
||||||
|
StorageType.SSD));
|
||||||
|
}
|
||||||
|
|
||||||
|
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
|
||||||
|
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
|
new File(baseDir, "name").getPath());
|
||||||
|
DFSTestUtil.formatNameNode(conf);
|
||||||
|
NameNode namenode = new NameNode(conf);
|
||||||
|
|
||||||
|
final BlockManager bm = namenode.getNamesystem().getBlockManager();
|
||||||
|
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
|
||||||
|
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
|
||||||
|
for (DatanodeDescriptor datanode : dataNodes) {
|
||||||
|
cluster.add(datanode);
|
||||||
|
}
|
||||||
|
|
||||||
|
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
|
||||||
|
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
|
||||||
|
new HashSet<Node>(), 0, policy);
|
||||||
|
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
|
||||||
|
Assert.assertEquals(2, targets.length);
|
||||||
|
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
|
||||||
|
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
|
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
|
||||||
//HDFS8219
|
//HDFS8219
|
||||||
|
@ -1233,4 +1278,32 @@ public class TestBlockStoragePolicy {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStorageType() {
|
||||||
|
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
|
||||||
|
|
||||||
|
//put storage type is reversed order
|
||||||
|
map.put(StorageType.ARCHIVE, 1);
|
||||||
|
map.put(StorageType.DISK, 1);
|
||||||
|
map.put(StorageType.SSD, 1);
|
||||||
|
map.put(StorageType.RAM_DISK, 1);
|
||||||
|
|
||||||
|
{
|
||||||
|
final Iterator<StorageType> i = map.keySet().iterator();
|
||||||
|
Assert.assertEquals(StorageType.RAM_DISK, i.next());
|
||||||
|
Assert.assertEquals(StorageType.SSD, i.next());
|
||||||
|
Assert.assertEquals(StorageType.DISK, i.next());
|
||||||
|
Assert.assertEquals(StorageType.ARCHIVE, i.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
final Iterator<Map.Entry<StorageType, Integer>> i
|
||||||
|
= map.entrySet().iterator();
|
||||||
|
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
|
||||||
|
Assert.assertEquals(StorageType.SSD, i.next().getKey());
|
||||||
|
Assert.assertEquals(StorageType.DISK, i.next().getKey());
|
||||||
|
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue