HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao.

This commit is contained in:
Jing Zhao 2014-09-24 19:11:16 -07:00
parent 72b0881ca6
commit 428a76663a
17 changed files with 119 additions and 41 deletions

View File

@ -345,6 +345,9 @@ Trunk (Unreleased)
HDFS-7081. Add new DistributedFileSystem API for getting all the existing
storage policies. (jing9)
HDFS-7140. Add a tool to list all the existing block storage policies.
(jing9)
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -48,6 +48,7 @@ function hadoop_usage
echo " secondarynamenode run the DFS secondary namenode"
echo " snapshotDiff diff two snapshots of a directory or diff the"
echo " current directory contents with a snapshot"
echo " storagepolicies get all the existing block storage policies"
echo " zkfc run the ZK Failover Controller daemon"
echo ""
echo "Most commands print help when invoked w/o parameters."
@ -216,6 +217,9 @@ case ${COMMAND} in
snapshotDiff)
CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
;;
storagepolicies)
CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
;;
zkfc)
daemon="true"
CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'

View File

@ -47,7 +47,7 @@ if "%1" == "--config" (
goto print_usage
)
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies
for %%i in ( %hdfscommands% ) do (
if %hdfs-command% == %%i set hdfscommand=true
)
@ -155,6 +155,10 @@ goto :eof
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
goto :eof
:storagepolicies
set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
goto :eof
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
:make_command_arguments
if "%1" == "--config" (
@ -204,6 +208,7 @@ goto :eof
@echo Use -help to see options
@echo cacheadmin configure the HDFS cache
@echo mover run a utility to move block replicas across storage types
@echo storagepolicies get all the existing block storage policies
@echo.
@echo Most commands print help when invoked w/o parameters.

View File

@ -1783,8 +1783,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* @return All the existing storage policies
*/
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
return namenode.getStoragePolicySuite();
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
return namenode.getStoragePolicies();
}
/**

View File

@ -506,9 +506,9 @@ public class DistributedFileSystem extends FileSystem {
}
/** Get all the existing storage policies */
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
statistics.incrementReadOps(1);
return dfs.getStoragePolicySuite();
return dfs.getStoragePolicies();
}
/**

View File

@ -209,7 +209,7 @@ public class BlockStoragePolicy {
return getClass().getSimpleName() + "{" + name + ":" + id
+ ", storageTypes=" + Arrays.asList(storageTypes)
+ ", creationFallbacks=" + Arrays.asList(creationFallbacks)
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks);
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}";
}
public byte getId() {

View File

@ -43,13 +43,11 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventsList;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
@ -264,7 +262,7 @@ public interface ClientProtocol {
* @return All the in-use block storage policies currently.
*/
@Idempotent
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException;
public BlockStoragePolicy[] getStoragePolicies() throws IOException;
/**
* Set the storage policy for a file/directory

View File

@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
@ -1433,13 +1433,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
@Override
public GetStoragePolicySuiteResponseProto getStoragePolicySuite(
RpcController controller, GetStoragePolicySuiteRequestProto request)
public GetStoragePoliciesResponseProto getStoragePolicies(
RpcController controller, GetStoragePoliciesRequestProto request)
throws ServiceException {
try {
BlockStoragePolicy[] policies = server.getStoragePolicySuite();
GetStoragePolicySuiteResponseProto.Builder builder =
GetStoragePolicySuiteResponseProto.newBuilder();
BlockStoragePolicy[] policies = server.getStoragePolicies();
GetStoragePoliciesResponseProto.Builder builder =
GetStoragePoliciesResponseProto.newBuilder();
if (policies == null) {
return builder.build();
}

View File

@ -118,8 +118,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
@ -226,9 +226,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
GetDataEncryptionKeyRequestProto.newBuilder().build();
private final static GetStoragePolicySuiteRequestProto
VOID_GET_STORAGE_POLICY_SUITE_REQUEST =
GetStoragePolicySuiteRequestProto.newBuilder().build();
private final static GetStoragePoliciesRequestProto
VOID_GET_STORAGE_POLICIES_REQUEST =
GetStoragePoliciesRequestProto.newBuilder().build();
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
rpcProxy = proxy;
@ -1456,10 +1456,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
try {
GetStoragePolicySuiteResponseProto response = rpcProxy
.getStoragePolicySuite(null, VOID_GET_STORAGE_POLICY_SUITE_REQUEST);
GetStoragePoliciesResponseProto response = rpcProxy
.getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
return PBHelper.convertStoragePolicies(response.getPoliciesList());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);

View File

@ -402,7 +402,7 @@ public class BlockManager {
return storagePolicySuite.getPolicy(policyName);
}
public BlockStoragePolicy[] getStoragePolicySuite() {
public BlockStoragePolicy[] getStoragePolicies() {
return storagePolicySuite.getAllPolicies();
}

View File

@ -142,7 +142,7 @@ public class Mover {
private void initStoragePolicies() throws IOException {
BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem()
.getStoragePolicySuite();
.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
this.blockStoragePolicies[policy.getId()] = policy;
}
@ -387,8 +387,8 @@ public class Mover {
boolean scheduleMoveReplica(DBlock db, MLocation ml,
List<StorageType> targetTypes) {
final Source source = storages.getSource(ml);
return source == null ? false : scheduleMoveReplica(db,
storages.getSource(ml), targetTypes);
return source == null ? false : scheduleMoveReplica(db, source,
targetTypes);
}
boolean scheduleMoveReplica(DBlock db, Source source,

View File

@ -2304,13 +2304,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/**
* @return All the existing block storage policies
*/
BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ);
waitForLoadingFSImage();
readLock();
try {
checkOperation(OperationCategory.READ);
return blockManager.getStoragePolicySuite();
return blockManager.getStoragePolicies();
} finally {
readUnlock();
}

View File

@ -594,8 +594,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
return namesystem.getStoragePolicySuite();
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
return namesystem.getStoragePolicies();
}
@Override // ClientProtocol

View File

@ -614,7 +614,7 @@ public class DFSAdmin extends FsShell {
System.out.println("The storage policy of " + argv[1] + " is unspecified");
return 0;
}
BlockStoragePolicy[] policies = dfs.getStoragePolicySuite();
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy p : policies) {
if (p.getId() == storagePolicyId) {
System.out.println("The storage policy of " + argv[1] + ":\n" + p);

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
/**
* A tool listing all the existing block storage policies. No argument is
* required when using this tool.
*/
public class GetStoragePolicies extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
FileSystem fs = FileSystem.get(getConf());
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("GetStoragePolicies can only be used against HDFS. " +
"Please check the default FileSystem setting in your configuration.");
return 1;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
try {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
String[] content = e.getLocalizedMessage().split("\n");
System.err.println("GetStoragePolicies: " + content[0]);
return 1;
}
return 0;
}
public static void main(String[] args) throws Exception {
int rc = ToolRunner.run(new GetStoragePolicies(), args);
System.exit(rc);
}
}

View File

@ -108,10 +108,10 @@ message SetStoragePolicyRequestProto {
message SetStoragePolicyResponseProto { // void response
}
message GetStoragePolicySuiteRequestProto { // void request
message GetStoragePoliciesRequestProto { // void request
}
message GetStoragePolicySuiteResponseProto {
message GetStoragePoliciesResponseProto {
repeated BlockStoragePolicyProto policies = 1;
}
@ -706,8 +706,8 @@ service ClientNamenodeProtocol {
returns(SetReplicationResponseProto);
rpc setStoragePolicy(SetStoragePolicyRequestProto)
returns(SetStoragePolicyResponseProto);
rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto)
returns(GetStoragePolicySuiteResponseProto);
rpc getStoragePolicies(GetStoragePoliciesRequestProto)
returns(GetStoragePoliciesResponseProto);
rpc setPermission(SetPermissionRequestProto)
returns(SetPermissionResponseProto);
rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);

View File

@ -73,11 +73,14 @@ public class TestBlockStoragePolicy {
public void testDefaultPolicies() {
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
expectedPolicyStrings.put(COLD,
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], creationFallbacks=[], replicationFallbacks=[]");
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " +
"creationFallbacks=[], replicationFallbacks=[]}");
expectedPolicyStrings.put(WARM,
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]");
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}");
expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]");
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
@ -1102,7 +1105,7 @@ public class TestBlockStoragePolicy {
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
BlockStoragePolicy[] policies = fs.getStoragePolicySuite();
BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(3, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString());