HDFS-7171. Fix Jenkins failures in HDFS-6581 branch. (Arpit Agarwal)
Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
This commit is contained in:
parent
c865c93dc1
commit
ae8c9cdb18
|
@ -2357,7 +2357,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
INode inode = dir.getINode(src);
|
||||
|
||||
// get the corresponding policy and make sure the policy name is valid
|
||||
BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName);
|
||||
|
|
|
@ -71,6 +71,7 @@ public class TestBlockStoragePolicy {
|
|||
static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID;
|
||||
static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
|
||||
static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
|
||||
static final byte LAZY_PERSIST = HdfsConstants.LAZY_PERSIST_STORAGE_POLICY_ID;
|
||||
|
||||
@Test (timeout=300000)
|
||||
public void testConfigKeyEnabled() throws IOException {
|
||||
|
@ -126,6 +127,9 @@ public class TestBlockStoragePolicy {
|
|||
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
|
||||
", storageTypes=[SSD], creationFallbacks=[DISK], " +
|
||||
"replicationFallbacks=[DISK]}");
|
||||
expectedPolicyStrings.put(LAZY_PERSIST,
|
||||
"BlockStoragePolicy{LAZY_PERSIST:" + LAZY_PERSIST + ", storageTypes=[RAM_DISK, DISK], " +
|
||||
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
|
||||
|
||||
for(byte i = 1; i < 16; i++) {
|
||||
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
|
||||
|
@ -1151,13 +1155,19 @@ public class TestBlockStoragePolicy {
|
|||
final DistributedFileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
BlockStoragePolicy[] policies = fs.getStoragePolicies();
|
||||
Assert.assertEquals(5, policies.length);
|
||||
Assert.assertEquals(6, policies.length);
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
|
||||
policies[0].toString());
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
|
||||
policies[1].toString());
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(),
|
||||
policies[2].toString());
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(ONESSD).toString(),
|
||||
policies[3].toString());
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(ALLSSD).toString(),
|
||||
policies[4].toString());
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(LAZY_PERSIST).toString(),
|
||||
policies[5].toString());
|
||||
} finally {
|
||||
IOUtils.cleanup(null, fs);
|
||||
cluster.shutdown();
|
||||
|
|
|
@ -783,56 +783,4 @@ public class TestStorageMover {
|
|||
test.shutdownCluster();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test blocks of lazy_persist file on RAM_DISK will not be moved to other
|
||||
* storage types by the Storage Mover.
|
||||
*/
|
||||
@Test
|
||||
public void testRamDiskNotMoved() throws Exception {
|
||||
LOG.info("testRamDiskNotMoved");
|
||||
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
|
||||
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
|
||||
|
||||
final long diskCapacity = 100 * BLOCK_SIZE;
|
||||
final long archiveCapacity = (6 + HdfsConstants.MIN_BLOCKS_FOR_WRITE)
|
||||
* BLOCK_SIZE;
|
||||
final long ramDiskCapacity = 10 * BLOCK_SIZE;
|
||||
final long[][] capacities = genCapacities(1, 0, 0, 1,
|
||||
diskCapacity, archiveCapacity, ramDiskCapacity);
|
||||
final int LAZY_WRITER_INTERVAL_SEC = 1;
|
||||
final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
|
||||
1, (short)1, genStorageTypes(1, 0, 0, 1), capacities);
|
||||
clusterScheme.conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
|
||||
LAZY_WRITER_INTERVAL_SEC);
|
||||
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
|
||||
|
||||
try {
|
||||
test.runBasicTest(false);
|
||||
|
||||
// test creating a hot RAM_DISK file
|
||||
final int SEED = 0xFADED;
|
||||
final Path foo_hot = new Path(pathPolicyMap.hot, "foo_hot");
|
||||
DFSTestUtil.createFile(test.dfs, foo_hot, true, BLOCK_SIZE, BLOCK_SIZE,
|
||||
BLOCK_SIZE, (short) 1, SEED, true);
|
||||
Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
|
||||
test.dfs.getClient(), foo_hot, StorageType.RAM_DISK));
|
||||
|
||||
// Sleep for a short time to allow the lazy writer thread to do its job
|
||||
Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
|
||||
|
||||
// Verify policy related name change is allowed
|
||||
final Path foo_hot_new = new Path(pathPolicyMap.warm, "foo_hot");
|
||||
test.dfs.rename(foo_hot, pathPolicyMap.warm);
|
||||
Assert.assertTrue(test.dfs.exists(foo_hot_new));
|
||||
|
||||
// Verify blocks on ram disk will not be moved to other storage types by
|
||||
// policy based Storage Mover.
|
||||
test.migrate();
|
||||
Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
|
||||
test.dfs.getClient(), foo_hot_new, StorageType.RAM_DISK));
|
||||
} finally {
|
||||
test.shutdownCluster();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue