HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao.
Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
This commit is contained in:
parent
f83096d49c
commit
87f07e67b1
|
@ -703,6 +703,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-7081. Add new DistributedFileSystem API for getting all the existing
|
||||
storage policies. (jing9)
|
||||
|
||||
HDFS-7140. Add a tool to list all the existing block storage policies.
|
||||
(jing9)
|
||||
|
||||
Release 2.5.1 - 2014-09-05
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -64,6 +64,7 @@ function print_usage(){
|
|||
echo " nfs3 run an NFS version 3 gateway"
|
||||
echo " cacheadmin configure the HDFS cache"
|
||||
echo " crypto configure HDFS encryption zones"
|
||||
echo " storagepolicies get all the existing block storage policies"
|
||||
echo ""
|
||||
echo "Most commands print help when invoked w/o parameters."
|
||||
}
|
||||
|
@ -164,6 +165,8 @@ elif [ "$COMMAND" = "balancer" ] ; then
|
|||
elif [ "$COMMAND" = "mover" ] ; then
|
||||
CLASS=org.apache.hadoop.hdfs.server.mover.Mover
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
|
||||
elif [ "$COMMAND" = "storagepolicies" ] ; then
|
||||
CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
|
||||
elif [ "$COMMAND" = "jmxget" ] ; then
|
||||
CLASS=org.apache.hadoop.hdfs.tools.JMXGet
|
||||
elif [ "$COMMAND" = "oiv" ] ; then
|
||||
|
|
|
@ -47,7 +47,7 @@ if "%1" == "--config" (
|
|||
goto print_usage
|
||||
)
|
||||
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies
|
||||
for %%i in ( %hdfscommands% ) do (
|
||||
if %hdfs-command% == %%i set hdfscommand=true
|
||||
)
|
||||
|
@ -155,6 +155,10 @@ goto :eof
|
|||
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
|
||||
goto :eof
|
||||
|
||||
:storagepolicies
|
||||
set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
|
||||
goto :eof
|
||||
|
||||
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
|
||||
:make_command_arguments
|
||||
if "%1" == "--config" (
|
||||
|
@ -204,6 +208,7 @@ goto :eof
|
|||
@echo Use -help to see options
|
||||
@echo cacheadmin configure the HDFS cache
|
||||
@echo mover run a utility to move block replicas across storage types
|
||||
@echo storagepolicies get all the existing block storage policies
|
||||
@echo.
|
||||
@echo Most commands print help when invoked w/o parameters.
|
||||
|
||||
|
|
|
@ -1783,8 +1783,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
/**
|
||||
* @return All the existing storage policies
|
||||
*/
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
|
||||
return namenode.getStoragePolicySuite();
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
return namenode.getStoragePolicies();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -506,9 +506,9 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
/** Get all the existing storage policies */
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
return dfs.getStoragePolicySuite();
|
||||
return dfs.getStoragePolicies();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -209,7 +209,7 @@ public class BlockStoragePolicy {
|
|||
return getClass().getSimpleName() + "{" + name + ":" + id
|
||||
+ ", storageTypes=" + Arrays.asList(storageTypes)
|
||||
+ ", creationFallbacks=" + Arrays.asList(creationFallbacks)
|
||||
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks);
|
||||
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}";
|
||||
}
|
||||
|
||||
public byte getId() {
|
||||
|
|
|
@ -43,13 +43,11 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.inotify.Event;
|
||||
import org.apache.hadoop.hdfs.inotify.EventsList;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
|
@ -264,7 +262,7 @@ public interface ClientProtocol {
|
|||
* @return All the in-use block storage policies currently.
|
||||
*/
|
||||
@Idempotent
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException;
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException;
|
||||
|
||||
/**
|
||||
* Set the storage policy for a file/directory
|
||||
|
|
|
@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
|
||||
|
@ -1433,13 +1433,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public GetStoragePolicySuiteResponseProto getStoragePolicySuite(
|
||||
RpcController controller, GetStoragePolicySuiteRequestProto request)
|
||||
public GetStoragePoliciesResponseProto getStoragePolicies(
|
||||
RpcController controller, GetStoragePoliciesRequestProto request)
|
||||
throws ServiceException {
|
||||
try {
|
||||
BlockStoragePolicy[] policies = server.getStoragePolicySuite();
|
||||
GetStoragePolicySuiteResponseProto.Builder builder =
|
||||
GetStoragePolicySuiteResponseProto.newBuilder();
|
||||
BlockStoragePolicy[] policies = server.getStoragePolicies();
|
||||
GetStoragePoliciesResponseProto.Builder builder =
|
||||
GetStoragePoliciesResponseProto.newBuilder();
|
||||
if (policies == null) {
|
||||
return builder.build();
|
||||
}
|
||||
|
|
|
@ -118,8 +118,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
|
||||
|
@ -226,9 +226,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
|
||||
GetDataEncryptionKeyRequestProto.newBuilder().build();
|
||||
|
||||
private final static GetStoragePolicySuiteRequestProto
|
||||
VOID_GET_STORAGE_POLICY_SUITE_REQUEST =
|
||||
GetStoragePolicySuiteRequestProto.newBuilder().build();
|
||||
private final static GetStoragePoliciesRequestProto
|
||||
VOID_GET_STORAGE_POLICIES_REQUEST =
|
||||
GetStoragePoliciesRequestProto.newBuilder().build();
|
||||
|
||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
||||
rpcProxy = proxy;
|
||||
|
@ -1456,10 +1456,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
try {
|
||||
GetStoragePolicySuiteResponseProto response = rpcProxy
|
||||
.getStoragePolicySuite(null, VOID_GET_STORAGE_POLICY_SUITE_REQUEST);
|
||||
GetStoragePoliciesResponseProto response = rpcProxy
|
||||
.getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
|
||||
return PBHelper.convertStoragePolicies(response.getPoliciesList());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
|
|
|
@ -402,7 +402,7 @@ public class BlockManager {
|
|||
return storagePolicySuite.getPolicy(policyName);
|
||||
}
|
||||
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() {
|
||||
public BlockStoragePolicy[] getStoragePolicies() {
|
||||
return storagePolicySuite.getAllPolicies();
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ public class Mover {
|
|||
|
||||
private void initStoragePolicies() throws IOException {
|
||||
BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem()
|
||||
.getStoragePolicySuite();
|
||||
.getStoragePolicies();
|
||||
for (BlockStoragePolicy policy : policies) {
|
||||
this.blockStoragePolicies[policy.getId()] = policy;
|
||||
}
|
||||
|
@ -387,8 +387,8 @@ public class Mover {
|
|||
boolean scheduleMoveReplica(DBlock db, MLocation ml,
|
||||
List<StorageType> targetTypes) {
|
||||
final Source source = storages.getSource(ml);
|
||||
return source == null ? false : scheduleMoveReplica(db,
|
||||
storages.getSource(ml), targetTypes);
|
||||
return source == null ? false : scheduleMoveReplica(db, source,
|
||||
targetTypes);
|
||||
}
|
||||
|
||||
boolean scheduleMoveReplica(DBlock db, Source source,
|
||||
|
|
|
@ -2307,13 +2307,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
/**
|
||||
* @return All the existing block storage policies
|
||||
*/
|
||||
BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
|
||||
BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
checkOperation(OperationCategory.READ);
|
||||
waitForLoadingFSImage();
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
return blockManager.getStoragePolicySuite();
|
||||
return blockManager.getStoragePolicies();
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
|
|
|
@ -589,8 +589,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException {
|
||||
return namesystem.getStoragePolicySuite();
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
return namesystem.getStoragePolicies();
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
|
|
|
@ -620,7 +620,7 @@ public class DFSAdmin extends FsShell {
|
|||
System.out.println("The storage policy of " + argv[1] + " is unspecified");
|
||||
return 0;
|
||||
}
|
||||
BlockStoragePolicy[] policies = dfs.getStoragePolicySuite();
|
||||
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
|
||||
for (BlockStoragePolicy p : policies) {
|
||||
if (p.getId() == storagePolicyId) {
|
||||
System.out.println("The storage policy of " + argv[1] + ":\n" + p);
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.tools;
|
||||
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A tool listing all the existing block storage policies. No argument is
|
||||
* required when using this tool.
|
||||
*/
|
||||
public class GetStoragePolicies extends Configured implements Tool {
|
||||
|
||||
@Override
|
||||
public int run(String[] args) throws Exception {
|
||||
FileSystem fs = FileSystem.get(getConf());
|
||||
if (!(fs instanceof DistributedFileSystem)) {
|
||||
System.err.println("GetStoragePolicies can only be used against HDFS. " +
|
||||
"Please check the default FileSystem setting in your configuration.");
|
||||
return 1;
|
||||
}
|
||||
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||
|
||||
try {
|
||||
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
|
||||
System.out.println("Block Storage Policies:");
|
||||
for (BlockStoragePolicy policy : policies) {
|
||||
if (policy != null) {
|
||||
System.out.println("\t" + policy);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
String[] content = e.getLocalizedMessage().split("\n");
|
||||
System.err.println("GetStoragePolicies: " + content[0]);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
int rc = ToolRunner.run(new GetStoragePolicies(), args);
|
||||
System.exit(rc);
|
||||
}
|
||||
}
|
|
@ -108,10 +108,10 @@ message SetStoragePolicyRequestProto {
|
|||
message SetStoragePolicyResponseProto { // void response
|
||||
}
|
||||
|
||||
message GetStoragePolicySuiteRequestProto { // void request
|
||||
message GetStoragePoliciesRequestProto { // void request
|
||||
}
|
||||
|
||||
message GetStoragePolicySuiteResponseProto {
|
||||
message GetStoragePoliciesResponseProto {
|
||||
repeated BlockStoragePolicyProto policies = 1;
|
||||
}
|
||||
|
||||
|
@ -706,8 +706,8 @@ service ClientNamenodeProtocol {
|
|||
returns(SetReplicationResponseProto);
|
||||
rpc setStoragePolicy(SetStoragePolicyRequestProto)
|
||||
returns(SetStoragePolicyResponseProto);
|
||||
rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto)
|
||||
returns(GetStoragePolicySuiteResponseProto);
|
||||
rpc getStoragePolicies(GetStoragePoliciesRequestProto)
|
||||
returns(GetStoragePoliciesResponseProto);
|
||||
rpc setPermission(SetPermissionRequestProto)
|
||||
returns(SetPermissionResponseProto);
|
||||
rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);
|
||||
|
|
|
@ -73,11 +73,14 @@ public class TestBlockStoragePolicy {
|
|||
public void testDefaultPolicies() {
|
||||
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
|
||||
expectedPolicyStrings.put(COLD,
|
||||
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], creationFallbacks=[], replicationFallbacks=[]");
|
||||
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " +
|
||||
"creationFallbacks=[], replicationFallbacks=[]}");
|
||||
expectedPolicyStrings.put(WARM,
|
||||
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]");
|
||||
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " +
|
||||
"creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}");
|
||||
expectedPolicyStrings.put(HOT,
|
||||
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]");
|
||||
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
|
||||
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
|
||||
|
||||
for(byte i = 1; i < 16; i++) {
|
||||
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
|
||||
|
@ -1102,7 +1105,7 @@ public class TestBlockStoragePolicy {
|
|||
cluster.waitActive();
|
||||
final DistributedFileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
BlockStoragePolicy[] policies = fs.getStoragePolicySuite();
|
||||
BlockStoragePolicy[] policies = fs.getStoragePolicies();
|
||||
Assert.assertEquals(3, policies.length);
|
||||
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
|
||||
policies[0].toString());
|
||||
|
|
Loading…
Reference in New Issue