HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart. (surendra singh lilhore via Xiaoyu Yao)

(cherry picked from commit 0100b15501)
This commit is contained in:
Xiaoyu Yao 2015-05-05 13:41:14 -07:00
parent eb0c6d2ee7
commit e68e8b3b5c
3 changed files with 49 additions and 1 deletions

View File

@ -299,6 +299,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8290. WebHDFS calls before namesystem initialization can cause HDFS-8290. WebHDFS calls before namesystem initialization can cause
NullPointerException. (cnauroth) NullPointerException. (cnauroth)
HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart.
(surendra singh lilhore via Xiaoyu Yao)
Release 2.7.1 - UNRELEASED Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -740,7 +740,7 @@ public class FSEditLog implements LogsPurgeable {
.setClientMachine( .setClientMachine(
newNode.getFileUnderConstructionFeature().getClientMachine()) newNode.getFileUnderConstructionFeature().getClientMachine())
.setOverwrite(overwrite) .setOverwrite(overwrite)
.setStoragePolicyId(newNode.getStoragePolicyID()); .setStoragePolicyId(newNode.getLocalStoragePolicyID());
AclFeature f = newNode.getAclFeature(); AclFeature f = newNode.getAclFeature();
if (f != null) { if (f != null) {

View File

@ -26,6 +26,7 @@ import java.util.*;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -1177,4 +1178,48 @@ public class TestBlockStoragePolicy {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
fs.mkdirs(dir);
// 2. Set Dir policy
fs.setStoragePolicy(dir, "COLD");
// 3. Create file
final FSDataOutputStream out = fs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
// 5. get file policy, it should be parent policy.
Assert
.assertTrue(
"File storage policy should be HOT",
status.getStoragePolicy()
== HdfsServerConstants.HOT_STORAGE_POLICY_ID);
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
Assert
.assertTrue(
"File storage policy should be HOT",
status.getStoragePolicy()
== HdfsServerConstants.HOT_STORAGE_POLICY_ID);
} finally {
cluster.shutdown();
}
}
} }