From 87f07e67b116ac14ebadb5ad0cc13831c085cbd0 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 24 Sep 2014 19:11:16 -0700 Subject: [PATCH] HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao. Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop-hdfs/src/main/bin/hdfs | 3 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 7 +- .../org/apache/hadoop/hdfs/DFSClient.java | 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 4 +- .../hdfs/protocol/BlockStoragePolicy.java | 2 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 4 +- ...amenodeProtocolServerSideTranslatorPB.java | 14 ++-- .../ClientNamenodeProtocolTranslatorPB.java | 16 ++--- .../server/blockmanagement/BlockManager.java | 2 +- .../hadoop/hdfs/server/mover/Mover.java | 6 +- .../hdfs/server/namenode/FSNamesystem.java | 4 +- .../server/namenode/NameNodeRpcServer.java | 4 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 2 +- .../hadoop/hdfs/tools/GetStoragePolicies.java | 65 +++++++++++++++++++ .../main/proto/ClientNamenodeProtocol.proto | 8 +-- .../hadoop/hdfs/TestBlockStoragePolicy.java | 11 ++-- 17 files changed, 118 insertions(+), 41 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fec26f42090..f51551b1408 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -703,6 +703,9 @@ Release 2.6.0 - UNRELEASED HDFS-7081. Add new DistributedFileSystem API for getting all the existing storage policies. (jing9) + HDFS-7140. Add a tool to list all the existing block storage policies. + (jing9) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 046163ca8a1..b5af54ec63b 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -64,6 +64,7 @@ function print_usage(){ echo " nfs3 run an NFS version 3 gateway" echo " cacheadmin configure the HDFS cache" echo " crypto configure HDFS encryption zones" + echo " storagepolicies get all the existing block storage policies" echo "" echo "Most commands print help when invoked w/o parameters." } @@ -164,6 +165,8 @@ elif [ "$COMMAND" = "balancer" ] ; then elif [ "$COMMAND" = "mover" ] ; then CLASS=org.apache.hadoop.hdfs.server.mover.Mover HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}" +elif [ "$COMMAND" = "storagepolicies" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies elif [ "$COMMAND" = "jmxget" ] ; then CLASS=org.apache.hadoop.hdfs.tools.JMXGet elif [ "$COMMAND" = "oiv" ] ; then diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 9fb84261e1c..69424ed745f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -47,7 +47,7 @@ if "%1" == "--config" ( goto print_usage ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -155,6 +155,10 @@ goto :eof set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS% goto :eof +:storagepolicies + set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -204,6 +208,7 @@ goto :eof @echo Use -help to see options @echo cacheadmin configure the HDFS cache @echo mover run a utility to move block replicas across storage types + @echo storagepolicies get all the existing block storage policies @echo. @echo Most commands print help when invoked w/o parameters. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 03f5670a2b2..e9fe06f507d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1783,8 +1783,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, /** * @return All the existing storage policies */ - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { - return namenode.getStoragePolicySuite(); + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + return namenode.getStoragePolicies(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 3069289a902..e796a5c04b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -506,9 +506,9 @@ public class DistributedFileSystem extends FileSystem { } /** Get all the existing storage policies */ - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + public BlockStoragePolicy[] getStoragePolicies() throws IOException { statistics.incrementReadOps(1); - return dfs.getStoragePolicySuite(); + return dfs.getStoragePolicies(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java index 35bef516307..8ca83a0bd88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java @@ -209,7 +209,7 @@ public class BlockStoragePolicy { return getClass().getSimpleName() + "{" + name + ":" + id + ", storageTypes=" + Arrays.asList(storageTypes) + ", creationFallbacks=" + Arrays.asList(creationFallbacks) - + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks); + + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}"; } public byte getId() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 7e16febdd35..df67db65017 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -43,13 +43,11 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -264,7 +262,7 @@ public interface ClientProtocol { * @return All the in-use block storage policies currently. */ @Idempotent - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException; + public BlockStoragePolicy[] getStoragePolicies() throws IOException; /** * Set the storage policy for a file/directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 26a97629c99..adad3b81b87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; @@ -1433,13 +1433,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override - public GetStoragePolicySuiteResponseProto getStoragePolicySuite( - RpcController controller, GetStoragePolicySuiteRequestProto request) + public GetStoragePoliciesResponseProto getStoragePolicies( + RpcController controller, GetStoragePoliciesRequestProto request) throws ServiceException { try { - BlockStoragePolicy[] policies = server.getStoragePolicySuite(); - GetStoragePolicySuiteResponseProto.Builder builder = - GetStoragePolicySuiteResponseProto.newBuilder(); + BlockStoragePolicy[] policies = server.getStoragePolicies(); + GetStoragePoliciesResponseProto.Builder builder = + GetStoragePoliciesResponseProto.newBuilder(); if (policies == null) { return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 22238b43bab..90b52e0963e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -118,8 +118,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; @@ -226,9 +226,9 @@ public class ClientNamenodeProtocolTranslatorPB implements VOID_GET_DATA_ENCRYPTIONKEY_REQUEST = GetDataEncryptionKeyRequestProto.newBuilder().build(); - private final static GetStoragePolicySuiteRequestProto - VOID_GET_STORAGE_POLICY_SUITE_REQUEST = - GetStoragePolicySuiteRequestProto.newBuilder().build(); + private final static GetStoragePoliciesRequestProto + VOID_GET_STORAGE_POLICIES_REQUEST = + GetStoragePoliciesRequestProto.newBuilder().build(); public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { rpcProxy = proxy; @@ -1456,10 +1456,10 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + public BlockStoragePolicy[] getStoragePolicies() throws IOException { try { - GetStoragePolicySuiteResponseProto response = rpcProxy - .getStoragePolicySuite(null, VOID_GET_STORAGE_POLICY_SUITE_REQUEST); + GetStoragePoliciesResponseProto response = rpcProxy + .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST); return PBHelper.convertStoragePolicies(response.getPoliciesList()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 80a1883970a..3404efba1b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -402,7 +402,7 @@ public class BlockManager { return storagePolicySuite.getPolicy(policyName); } - public BlockStoragePolicy[] getStoragePolicySuite() { + public BlockStoragePolicy[] getStoragePolicies() { return storagePolicySuite.getAllPolicies(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index c2221818fc8..4db0df69d3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -142,7 +142,7 @@ public class Mover { private void initStoragePolicies() throws IOException { BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem() - .getStoragePolicySuite(); + .getStoragePolicies(); for (BlockStoragePolicy policy : policies) { this.blockStoragePolicies[policy.getId()] = policy; } @@ -387,8 +387,8 @@ public class Mover { boolean scheduleMoveReplica(DBlock db, MLocation ml, List targetTypes) { final Source source = storages.getSource(ml); - return source == null ? false : scheduleMoveReplica(db, - storages.getSource(ml), targetTypes); + return source == null ? false : scheduleMoveReplica(db, source, + targetTypes); } boolean scheduleMoveReplica(DBlock db, Source source, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 955ff083d9e..41484746728 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2307,13 +2307,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * @return All the existing block storage policies */ - BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + BlockStoragePolicy[] getStoragePolicies() throws IOException { checkOperation(OperationCategory.READ); waitForLoadingFSImage(); readLock(); try { checkOperation(OperationCategory.READ); - return blockManager.getStoragePolicySuite(); + return blockManager.getStoragePolicies(); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index bb2fb8351cf..b6be8778744 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -589,8 +589,8 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { - return namesystem.getStoragePolicySuite(); + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + return namesystem.getStoragePolicies(); } @Override // ClientProtocol diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 17e6cc949a2..298564e41a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -620,7 +620,7 @@ public class DFSAdmin extends FsShell { System.out.println("The storage policy of " + argv[1] + " is unspecified"); return 0; } - BlockStoragePolicy[] policies = dfs.getStoragePolicySuite(); + BlockStoragePolicy[] policies = dfs.getStoragePolicies(); for (BlockStoragePolicy p : policies) { if (p.getId() == storagePolicyId) { System.out.println("The storage policy of " + argv[1] + ":\n" + p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java new file mode 100644 index 00000000000..d2793ebb739 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import java.io.IOException; + +/** + * A tool listing all the existing block storage policies. No argument is + * required when using this tool. + */ +public class GetStoragePolicies extends Configured implements Tool { + + @Override + public int run(String[] args) throws Exception { + FileSystem fs = FileSystem.get(getConf()); + if (!(fs instanceof DistributedFileSystem)) { + System.err.println("GetStoragePolicies can only be used against HDFS. " + + "Please check the default FileSystem setting in your configuration."); + return 1; + } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + + try { + BlockStoragePolicy[] policies = dfs.getStoragePolicies(); + System.out.println("Block Storage Policies:"); + for (BlockStoragePolicy policy : policies) { + if (policy != null) { + System.out.println("\t" + policy); + } + } + } catch (IOException e) { + String[] content = e.getLocalizedMessage().split("\n"); + System.err.println("GetStoragePolicies: " + content[0]); + return 1; + } + return 0; + } + + public static void main(String[] args) throws Exception { + int rc = ToolRunner.run(new GetStoragePolicies(), args); + System.exit(rc); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index ce7bf1cde8b..e09f142a688 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -108,10 +108,10 @@ message SetStoragePolicyRequestProto { message SetStoragePolicyResponseProto { // void response } -message GetStoragePolicySuiteRequestProto { // void request +message GetStoragePoliciesRequestProto { // void request } -message GetStoragePolicySuiteResponseProto { +message GetStoragePoliciesResponseProto { repeated BlockStoragePolicyProto policies = 1; } @@ -706,8 +706,8 @@ service ClientNamenodeProtocol { returns(SetReplicationResponseProto); rpc setStoragePolicy(SetStoragePolicyRequestProto) returns(SetStoragePolicyResponseProto); - rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto) - returns(GetStoragePolicySuiteResponseProto); + rpc getStoragePolicies(GetStoragePoliciesRequestProto) + returns(GetStoragePoliciesResponseProto); rpc setPermission(SetPermissionRequestProto) returns(SetPermissionResponseProto); rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 38ffceef969..39d143946f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -73,11 +73,14 @@ public class TestBlockStoragePolicy { public void testDefaultPolicies() { final Map expectedPolicyStrings = new HashMap(); expectedPolicyStrings.put(COLD, - "BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], creationFallbacks=[], replicationFallbacks=[]"); + "BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " + + "creationFallbacks=[], replicationFallbacks=[]}"); expectedPolicyStrings.put(WARM, - "BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]"); + "BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " + + "creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}"); expectedPolicyStrings.put(HOT, - "BlockStoragePolicy{HOT:12, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]"); + "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " + + "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}"); for(byte i = 1; i < 16; i++) { final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); @@ -1102,7 +1105,7 @@ public class TestBlockStoragePolicy { cluster.waitActive(); final DistributedFileSystem fs = cluster.getFileSystem(); try { - BlockStoragePolicy[] policies = fs.getStoragePolicySuite(); + BlockStoragePolicy[] policies = fs.getStoragePolicies(); Assert.assertEquals(3, policies.length); Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), policies[0].toString());