HDFS-7228. Add an SSD policy into the default BlockStoragePolicySuite. Contributed by Jing Zhao.

This commit is contained in:
Jing Zhao 2014-10-14 10:22:34 -07:00
parent 5faaba0bd0
commit 7dcad84143
7 changed files with 86 additions and 77 deletions

View File

@ -676,6 +676,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7195. Update user doc of secure mode about Datanodes don't require root
or jsvc. (cnauroth)
HDFS-7228. Add an SSD policy into the default BlockStoragePolicySuite.
(jing9)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -455,8 +455,8 @@ public class DFSOutputStream extends FSOutputSummer
}
private boolean initLazyPersist(HdfsFileStatus stat) {
final BlockStoragePolicy lpPolicy =
blockStoragePolicySuite.getPolicy("LAZY_PERSIST");
final BlockStoragePolicy lpPolicy = blockStoragePolicySuite
.getPolicy(HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
return lpPolicy != null &&
stat.getStoragePolicy() == lpPolicy.getId();
}

View File

@ -164,4 +164,18 @@ public class HdfsConstants {
public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR
= Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR;
public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
public static final String HOT_STORAGE_POLICY_NAME = "HOT";
public static final String WARM_STORAGE_POLICY_NAME = "WARM";
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
public static final byte MEMORY_STORAGE_POLICY_ID = 15;
public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
public static final byte ONESSD_STORAGE_POLICY_ID = 10;
public static final byte HOT_STORAGE_POLICY_ID = 7;
public static final byte WARM_STORAGE_POLICY_ID = 5;
public static final byte COLD_STORAGE_POLICY_ID = 2;
}

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -44,23 +45,39 @@ public class BlockStoragePolicySuite {
public static BlockStoragePolicySuite createDefaultSuite() {
final BlockStoragePolicy[] policies =
new BlockStoragePolicy[1 << ID_BIT_LENGTH];
final byte lazyPersistId = 15;
policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, "LAZY_PERSIST",
final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK},
true); // Cannot be changed on regular files, but inherited.
final byte hotId = 12;
policies[hotId] = new BlockStoragePolicy(hotId, "HOT",
final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
policies[allssdId] = new BlockStoragePolicy(allssdId,
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.SSD},
new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK});
final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
policies[onessdId] = new BlockStoragePolicy(onessdId,
HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.SSD, StorageType.DISK},
new StorageType[]{StorageType.SSD, StorageType.DISK},
new StorageType[]{StorageType.SSD, StorageType.DISK});
final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
policies[hotId] = new BlockStoragePolicy(hotId,
HdfsConstants.HOT_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
new StorageType[]{StorageType.ARCHIVE});
final byte warmId = 8;
policies[warmId] = new BlockStoragePolicy(warmId, "WARM",
final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
policies[warmId] = new BlockStoragePolicy(warmId,
HdfsConstants.WARM_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
final byte coldId = 4;
policies[coldId] = new BlockStoragePolicy(coldId, "COLD",
final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
policies[coldId] = new BlockStoragePolicy(coldId,
HdfsConstants.COLD_STORAGE_POLICY_NAME,
new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,
StorageType.EMPTY_ARRAY);
return new BlockStoragePolicySuite(hotId, policies);

View File

@ -1133,7 +1133,8 @@ public class DFSTestUtil {
// OP_CLOSE 9
s.close();
// OP_SET_STORAGE_POLICY 45
filesystem.setStoragePolicy(pathFileCreate, "HOT");
filesystem.setStoragePolicy(pathFileCreate,
HdfsConstants.HOT_STORAGE_POLICY_NAME);
// OP_RENAME_OLD 1
final Path pathFileMoved = new Path("/file_moved");
filesystem.rename(pathFileCreate, pathFileMoved);

View File

@ -66,10 +66,12 @@ public class TestBlockStoragePolicy {
static final long FILE_LEN = 1024;
static final short REPLICATION = 3;
static final byte COLD = (byte) 4;
static final byte WARM = (byte) 8;
static final byte HOT = (byte) 12;
static final byte LAZY_PERSIST = (byte) 15;
static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID;
static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID;
static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID;
static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
static final byte LAZY_PERSIST = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
@Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException {
@ -79,7 +81,8 @@ public class TestBlockStoragePolicy {
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
@ -98,7 +101,8 @@ public class TestBlockStoragePolicy {
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"), "COLD");
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
@ -108,17 +112,25 @@ public class TestBlockStoragePolicy {
public void testDefaultPolicies() {
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
expectedPolicyStrings.put(COLD,
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " +
"BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " +
"creationFallbacks=[], replicationFallbacks=[]}");
expectedPolicyStrings.put(WARM,
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}");
"BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], " +
"replicationFallbacks=[DISK, ARCHIVE]}");
expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
"BlockStoragePolicy{HOT:" + HOT + ", storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
expectedPolicyStrings.put(LAZY_PERSIST,
"BlockStoragePolicy{LAZY_PERSIST:15, storageTypes=[RAM_DISK, DISK], " +
"BlockStoragePolicy{LAZY_PERSIST:" + LAZY_PERSIST +
", storageTypes=[RAM_DISK, DISK], " +
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
expectedPolicyStrings.put(ONESSD, "BlockStoragePolicy{ONE_SSD:" + ONESSD +
", storageTypes=[SSD, DISK], creationFallbacks=[SSD, DISK], " +
"replicationFallbacks=[SSD, DISK]}");
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
", storageTypes=[SSD], creationFallbacks=[DISK], " +
"replicationFallbacks=[DISK]}");
for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
@ -845,15 +857,15 @@ public class TestBlockStoragePolicy {
final Path invalidPath = new Path("/invalidPath");
try {
fs.setStoragePolicy(invalidPath, "WARM");
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
Assert.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
fs.setStoragePolicy(fooFile, "COLD");
fs.setStoragePolicy(barDir, "WARM");
fs.setStoragePolicy(barFile2, "HOT");
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
@ -901,7 +913,7 @@ public class TestBlockStoragePolicy {
DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
fs.setStoragePolicy(fooDir, "WARM");
fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@ -913,7 +925,7 @@ public class TestBlockStoragePolicy {
// take snapshot
SnapshotTestHelper.createSnapshot(fs, dir, "s1");
// change the storage policy of fooFile1
fs.setStoragePolicy(fooFile1, "COLD");
fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
@ -936,7 +948,7 @@ public class TestBlockStoragePolicy {
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
// change the storage policy of foo dir
fs.setStoragePolicy(fooDir, "HOT");
fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// /dir/foo is now hot
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
@ -1053,7 +1065,7 @@ public class TestBlockStoragePolicy {
*/
@Test
public void testChangeHotFileRep() throws Exception {
testChangeFileRep("HOT", HOT,
testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
new StorageType[]{StorageType.DISK, StorageType.DISK,
StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
@ -1067,7 +1079,7 @@ public class TestBlockStoragePolicy {
*/
@Test
public void testChangeWarmRep() throws Exception {
testChangeFileRep("WARM", WARM,
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
@ -1080,7 +1092,7 @@ public class TestBlockStoragePolicy {
*/
@Test
public void testChangeColdRep() throws Exception {
testChangeFileRep("COLD", COLD,
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
@ -1144,7 +1156,7 @@ public class TestBlockStoragePolicy {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(4, policies.length);
Assert.assertEquals(6, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),

View File

@ -32,7 +32,6 @@ import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -67,8 +67,6 @@ import org.junit.Test;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC;
/**
* Test the data migration tool (for Archival Storage)
*/
@ -100,9 +98,9 @@ public class TestStorageMover {
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
HOT = DEFAULT_POLICIES.getPolicy("HOT");
WARM = DEFAULT_POLICIES.getPolicy("WARM");
COLD = DEFAULT_POLICIES.getPolicy("COLD");
HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);
WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME);
COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME);
TestBalancer.initTestSetup();
Dispatcher.setDelayAfterErrors(1000L);
}
@ -201,14 +199,6 @@ public class TestStorageMover {
this.policies = DEFAULT_POLICIES;
}
MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme,
BlockStoragePolicySuite policies) {
this.clusterScheme = cScheme;
this.nsScheme = nsScheme;
this.conf = clusterScheme.conf;
this.policies = policies;
}
/**
* Set up the cluster and start NameNode and DataNodes according to the
* corresponding scheme.
@ -273,9 +263,6 @@ public class TestStorageMover {
}
if (verifyAll) {
verifyNamespace();
} else {
// TODO verify according to the given path list
}
}
@ -413,11 +400,6 @@ public class TestStorageMover {
return genStorageTypes(numDataNodes, 0, 0, 0);
}
private static StorageType[][] genStorageTypes(int numDataNodes,
int numAllDisk, int numAllArchive) {
return genStorageTypes(numDataNodes, numAllDisk, numAllArchive, 0);
}
private static StorageType[][] genStorageTypes(int numDataNodes,
int numAllDisk, int numAllArchive, int numRamDisk) {
Preconditions.checkArgument(
@ -441,26 +423,6 @@ public class TestStorageMover {
return types;
}
private static long[][] genCapacities(int nDatanodes, int numAllDisk,
int numAllArchive, int numRamDisk, long diskCapacity,
long archiveCapacity, long ramDiskCapacity) {
final long[][] capacities = new long[nDatanodes][];
int i = 0;
for (; i < numRamDisk; i++) {
capacities[i] = new long[]{ramDiskCapacity, diskCapacity};
}
for (; i < numRamDisk + numAllDisk; i++) {
capacities[i] = new long[]{diskCapacity, diskCapacity};
}
for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
capacities[i] = new long[]{archiveCapacity, archiveCapacity};
}
for(; i < capacities.length; i++) {
capacities[i] = new long[]{diskCapacity, archiveCapacity};
}
return capacities;
}
private static class PathPolicyMap {
final Map<Path, BlockStoragePolicy> map = Maps.newHashMap();
final Path hot = new Path("/hot");
@ -666,8 +628,8 @@ public class TestStorageMover {
private void setVolumeFull(DataNode dn, StorageType type) {
List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
for (int j = 0; j < volumes.size(); ++j) {
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
for (FsVolumeSpi v : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) v;
if (volume.getStorageType() == type) {
LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
+ volume.getStorageID());