diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 713c23cc58d..85289997353 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -279,6 +279,20 @@ public interface ClientProtocol {
void setStoragePolicy(String src, String policyName)
throws IOException;
+ /**
+ * Get the storage policy for a file/directory.
+ * @param path
+ * Path of an existing file/directory.
+ * @throws AccessControlException
+ * If access is denied
+ * @throws org.apache.hadoop.fs.UnresolvedLinkException
+ * if src
contains a symlink
+ * @throws java.io.FileNotFoundException
+ * If file/dir src
is not found
+ */
+ @Idempotent
+ BlockStoragePolicy getStoragePolicy(String path) throws IOException;
+
/**
* Set permissions for an existing file/directory.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index b44c556bbaa..7d3256887db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -112,6 +112,14 @@ message SetStoragePolicyRequestProto {
message SetStoragePolicyResponseProto { // void response
}
+message GetStoragePolicyRequestProto {
+ required string path = 1;
+}
+
+message GetStoragePolicyResponseProto {
+ required BlockStoragePolicyProto storagePolicy = 1;
+}
+
message GetStoragePoliciesRequestProto { // void request
}
@@ -725,6 +733,8 @@ service ClientNamenodeProtocol {
returns(SetReplicationResponseProto);
rpc setStoragePolicy(SetStoragePolicyRequestProto)
returns(SetStoragePolicyResponseProto);
+ rpc getStoragePolicy(GetStoragePolicyRequestProto)
+ returns(GetStoragePolicyResponseProto);
rpc getStoragePolicies(GetStoragePoliciesRequestProto)
returns(GetStoragePoliciesResponseProto);
rpc setPermission(SetPermissionRequestProto)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f093eeaff33..40f91f9f969 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -770,6 +770,9 @@ Release 2.8.0 - UNRELEASED
HDFS-6860. BlockStateChange logs are too noisy. (Chang Li and xyao via xyao)
+ HDFS-8815. DFS getStoragePolicy implementation using single RPC call
+ (Surendra Singh Lilhore via vinayakumarb)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 44713a455fe..3f4621eb114 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1574,21 +1574,22 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
/**
+ * @param path file/directory name
* @return Get the storage policy for specified path
*/
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
- HdfsFileStatus status = getFileInfo(path);
- if (status == null) {
- throw new FileNotFoundException("File does not exist: " + path);
+ checkOpen();
+ TraceScope scope = getPathTraceScope("getStoragePolicy", path);
+ try {
+ return namenode.getStoragePolicy(path);
+ } catch (RemoteException e) {
+ throw e.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ SafeModeException.class,
+ UnresolvedPathException.class);
+ } finally {
+ scope.close();
}
- byte storagePolicyId = status.getStoragePolicy();
- BlockStoragePolicy[] policies = getStoragePolicies();
- for (BlockStoragePolicy policy : policies) {
- if (policy.getId() == storagePolicyId) {
- return policy;
- }
- }
- return null;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 480b3d9bf8b..8e81fdc14a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -128,6 +128,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
@@ -198,6 +200,7 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -1457,6 +1460,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
return VOID_SET_STORAGE_POLICY_RESPONSE;
}
+ @Override
+ public GetStoragePolicyResponseProto getStoragePolicy(
+ RpcController controller, GetStoragePolicyRequestProto request)
+ throws ServiceException {
+ try {
+ BlockStoragePolicyProto policy = PBHelper.convert(server
+ .getStoragePolicy(request.getPath()));
+ return GetStoragePolicyResponseProto.newBuilder()
+ .setStoragePolicy(policy).build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
RpcController controller, GetStoragePoliciesRequestProto request)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 566d54f01da..d6afa6ed6a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -124,6 +124,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
@@ -1484,6 +1485,18 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
}
+ @Override
+ public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
+ GetStoragePolicyRequestProto request = GetStoragePolicyRequestProto
+ .newBuilder().setPath(path).build();
+ try {
+ return PBHelper.convert(rpcProxy.getStoragePolicy(null, request)
+ .getStoragePolicy());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
+
@Override
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index b322b698e6f..d624f84ae28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -200,6 +200,29 @@ public class FSDirAttrOp {
return bm.getStoragePolicies();
}
+ static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
+ String path) throws IOException {
+ FSPermissionChecker pc = fsd.getPermissionChecker();
+ byte[][] pathComponents = FSDirectory
+ .getPathComponentsForReservedPath(path);
+ fsd.readLock();
+ try {
+ path = fsd.resolvePath(pc, path, pathComponents);
+ final INodesInPath iip = fsd.getINodesInPath(path, false);
+ if (fsd.isPermissionEnabled()) {
+ fsd.checkPathAccess(pc, iip, FsAction.READ);
+ }
+ INode inode = iip.getLastINode();
+ if (inode == null) {
+ throw new FileNotFoundException("File/Directory does not exist: "
+ + iip.getPath());
+ }
+ return bm.getStoragePolicy(inode.getStoragePolicyID());
+ } finally {
+ fsd.readUnlock();
+ }
+ }
+
static long getPreferredBlockSize(FSDirectory fsd, String src)
throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a259070ff32..e3717abbe6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1956,6 +1956,25 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
logAuditEvent(true, "setStoragePolicy", src, null, auditStat);
}
+ /**
+ * Get the storage policy for a file or a directory.
+ *
+ * @param src
+ * file/directory path
+ * @return storage policy object
+ */
+ BlockStoragePolicy getStoragePolicy(String src) throws IOException {
+ checkOperation(OperationCategory.READ);
+ waitForLoadingFSImage();
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+ return FSDirAttrOp.getStoragePolicy(dir, blockManager, src);
+ } finally {
+ readUnlock();
+ }
+ }
+
/**
* @return All the existing block storage policies
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 52aaabd13e6..6b7e8cfa6c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -690,6 +690,12 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.setStoragePolicy(src, policyName);
}
+ @Override
+ public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
+ checkNNStartup();
+ return namesystem.getStoragePolicy(path);
+ }
+
@Override
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkNNStartup();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 631d9f787c7..689a1d187e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -979,6 +979,29 @@ public class TestBlockStoragePolicy {
}
}
+ @Test
+ public void testGetStoragePolicy() throws Exception {
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(REPLICATION).build();
+ cluster.waitActive();
+ final DistributedFileSystem fs = cluster.getFileSystem();
+ try {
+ final Path dir = new Path("/testGetStoragePolicy");
+ final Path fooFile = new Path(dir, "foo");
+ DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
+ DFSClient client = new DFSClient(cluster.getNameNode(0)
+ .getNameNodeAddress(), conf);
+ client.setStoragePolicy("/testGetStoragePolicy/foo",
+ HdfsConstants.COLD_STORAGE_POLICY_NAME);
+ String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
+ .getName();
+ Assert.assertEquals("File storage policy should be COLD",
+ HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)