HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao.

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
	hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
This commit is contained in:
Jing Zhao 2014-09-24 19:11:16 -07:00 committed by Tsz-Wo Nicholas Sze
parent f83096d49c
commit 87f07e67b1
17 changed files with 118 additions and 41 deletions

View File

@ -703,6 +703,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7081. Add new DistributedFileSystem API for getting all the existing HDFS-7081. Add new DistributedFileSystem API for getting all the existing
storage policies. (jing9) storage policies. (jing9)
HDFS-7140. Add a tool to list all the existing block storage policies.
(jing9)
Release 2.5.1 - 2014-09-05 Release 2.5.1 - 2014-09-05
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -64,6 +64,7 @@ function print_usage(){
echo " nfs3 run an NFS version 3 gateway" echo " nfs3 run an NFS version 3 gateway"
echo " cacheadmin configure the HDFS cache" echo " cacheadmin configure the HDFS cache"
echo " crypto configure HDFS encryption zones" echo " crypto configure HDFS encryption zones"
echo " storagepolicies get all the existing block storage policies"
echo "" echo ""
echo "Most commands print help when invoked w/o parameters." echo "Most commands print help when invoked w/o parameters."
} }
@ -164,6 +165,8 @@ elif [ "$COMMAND" = "balancer" ] ; then
elif [ "$COMMAND" = "mover" ] ; then elif [ "$COMMAND" = "mover" ] ; then
CLASS=org.apache.hadoop.hdfs.server.mover.Mover CLASS=org.apache.hadoop.hdfs.server.mover.Mover
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}" HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
elif [ "$COMMAND" = "storagepolicies" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
elif [ "$COMMAND" = "jmxget" ] ; then elif [ "$COMMAND" = "jmxget" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.JMXGet CLASS=org.apache.hadoop.hdfs.tools.JMXGet
elif [ "$COMMAND" = "oiv" ] ; then elif [ "$COMMAND" = "oiv" ] ; then

View File

@ -47,7 +47,7 @@ if "%1" == "--config" (
goto print_usage goto print_usage
) )
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies
for %%i in ( %hdfscommands% ) do ( for %%i in ( %hdfscommands% ) do (
if %hdfs-command% == %%i set hdfscommand=true if %hdfs-command% == %%i set hdfscommand=true
) )
@ -155,6 +155,10 @@ goto :eof
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS%
goto :eof goto :eof
:storagepolicies
set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies
goto :eof
@rem This changes %1, %2 etc. Hence those cannot be used after calling this. @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
:make_command_arguments :make_command_arguments
if "%1" == "--config" ( if "%1" == "--config" (
@ -204,6 +208,7 @@ goto :eof
@echo Use -help to see options @echo Use -help to see options
@echo cacheadmin configure the HDFS cache @echo cacheadmin configure the HDFS cache
@echo mover run a utility to move block replicas across storage types @echo mover run a utility to move block replicas across storage types
@echo storagepolicies get all the existing block storage policies
@echo. @echo.
@echo Most commands print help when invoked w/o parameters. @echo Most commands print help when invoked w/o parameters.

View File

@ -1783,8 +1783,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/** /**
* @return All the existing storage policies * @return All the existing storage policies
*/ */
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { public BlockStoragePolicy[] getStoragePolicies() throws IOException {
return namenode.getStoragePolicySuite(); return namenode.getStoragePolicies();
} }
/** /**

View File

@ -506,9 +506,9 @@ public class DistributedFileSystem extends FileSystem {
} }
/** Get all the existing storage policies */ /** Get all the existing storage policies */
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { public BlockStoragePolicy[] getStoragePolicies() throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
return dfs.getStoragePolicySuite(); return dfs.getStoragePolicies();
} }
/** /**

View File

@ -209,7 +209,7 @@ public class BlockStoragePolicy {
return getClass().getSimpleName() + "{" + name + ":" + id return getClass().getSimpleName() + "{" + name + ":" + id
+ ", storageTypes=" + Arrays.asList(storageTypes) + ", storageTypes=" + Arrays.asList(storageTypes)
+ ", creationFallbacks=" + Arrays.asList(creationFallbacks) + ", creationFallbacks=" + Arrays.asList(creationFallbacks)
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks); + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}";
} }
public byte getId() { public byte getId() {

View File

@ -43,13 +43,11 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.inotify.EventsList;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
@ -264,7 +262,7 @@ public interface ClientProtocol {
* @return All the in-use block storage policies currently. * @return All the in-use block storage policies currently.
*/ */
@Idempotent @Idempotent
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException; public BlockStoragePolicy[] getStoragePolicies() throws IOException;
/** /**
* Set the storage policy for a file/directory * Set the storage policy for a file/directory

View File

@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
@ -1433,13 +1433,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public GetStoragePolicySuiteResponseProto getStoragePolicySuite( public GetStoragePoliciesResponseProto getStoragePolicies(
RpcController controller, GetStoragePolicySuiteRequestProto request) RpcController controller, GetStoragePoliciesRequestProto request)
throws ServiceException { throws ServiceException {
try { try {
BlockStoragePolicy[] policies = server.getStoragePolicySuite(); BlockStoragePolicy[] policies = server.getStoragePolicies();
GetStoragePolicySuiteResponseProto.Builder builder = GetStoragePoliciesResponseProto.Builder builder =
GetStoragePolicySuiteResponseProto.newBuilder(); GetStoragePoliciesResponseProto.newBuilder();
if (policies == null) { if (policies == null) {
return builder.build(); return builder.build();
} }

View File

@ -118,8 +118,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
@ -226,9 +226,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
VOID_GET_DATA_ENCRYPTIONKEY_REQUEST = VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
GetDataEncryptionKeyRequestProto.newBuilder().build(); GetDataEncryptionKeyRequestProto.newBuilder().build();
private final static GetStoragePolicySuiteRequestProto private final static GetStoragePoliciesRequestProto
VOID_GET_STORAGE_POLICY_SUITE_REQUEST = VOID_GET_STORAGE_POLICIES_REQUEST =
GetStoragePolicySuiteRequestProto.newBuilder().build(); GetStoragePoliciesRequestProto.newBuilder().build();
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
rpcProxy = proxy; rpcProxy = proxy;
@ -1456,10 +1456,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { public BlockStoragePolicy[] getStoragePolicies() throws IOException {
try { try {
GetStoragePolicySuiteResponseProto response = rpcProxy GetStoragePoliciesResponseProto response = rpcProxy
.getStoragePolicySuite(null, VOID_GET_STORAGE_POLICY_SUITE_REQUEST); .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
return PBHelper.convertStoragePolicies(response.getPoliciesList()); return PBHelper.convertStoragePolicies(response.getPoliciesList());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);

View File

@ -402,7 +402,7 @@ public class BlockManager {
return storagePolicySuite.getPolicy(policyName); return storagePolicySuite.getPolicy(policyName);
} }
public BlockStoragePolicy[] getStoragePolicySuite() { public BlockStoragePolicy[] getStoragePolicies() {
return storagePolicySuite.getAllPolicies(); return storagePolicySuite.getAllPolicies();
} }

View File

@ -142,7 +142,7 @@ public class Mover {
private void initStoragePolicies() throws IOException { private void initStoragePolicies() throws IOException {
BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem() BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem()
.getStoragePolicySuite(); .getStoragePolicies();
for (BlockStoragePolicy policy : policies) { for (BlockStoragePolicy policy : policies) {
this.blockStoragePolicies[policy.getId()] = policy; this.blockStoragePolicies[policy.getId()] = policy;
} }
@ -387,8 +387,8 @@ public class Mover {
boolean scheduleMoveReplica(DBlock db, MLocation ml, boolean scheduleMoveReplica(DBlock db, MLocation ml,
List<StorageType> targetTypes) { List<StorageType> targetTypes) {
final Source source = storages.getSource(ml); final Source source = storages.getSource(ml);
return source == null ? false : scheduleMoveReplica(db, return source == null ? false : scheduleMoveReplica(db, source,
storages.getSource(ml), targetTypes); targetTypes);
} }
boolean scheduleMoveReplica(DBlock db, Source source, boolean scheduleMoveReplica(DBlock db, Source source,

View File

@ -2307,13 +2307,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/** /**
* @return All the existing block storage policies * @return All the existing block storage policies
*/ */
BlockStoragePolicy[] getStoragePolicySuite() throws IOException { BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
waitForLoadingFSImage(); waitForLoadingFSImage();
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
return blockManager.getStoragePolicySuite(); return blockManager.getStoragePolicies();
} finally { } finally {
readUnlock(); readUnlock();
} }

View File

@ -589,8 +589,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override @Override
public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { public BlockStoragePolicy[] getStoragePolicies() throws IOException {
return namesystem.getStoragePolicySuite(); return namesystem.getStoragePolicies();
} }
@Override // ClientProtocol @Override // ClientProtocol

View File

@ -620,7 +620,7 @@ public class DFSAdmin extends FsShell {
System.out.println("The storage policy of " + argv[1] + " is unspecified"); System.out.println("The storage policy of " + argv[1] + " is unspecified");
return 0; return 0;
} }
BlockStoragePolicy[] policies = dfs.getStoragePolicySuite(); BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy p : policies) { for (BlockStoragePolicy p : policies) {
if (p.getId() == storagePolicyId) { if (p.getId() == storagePolicyId) {
System.out.println("The storage policy of " + argv[1] + ":\n" + p); System.out.println("The storage policy of " + argv[1] + ":\n" + p);

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
/**
* A tool listing all the existing block storage policies. No argument is
* required when using this tool.
*/
public class GetStoragePolicies extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
FileSystem fs = FileSystem.get(getConf());
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("GetStoragePolicies can only be used against HDFS. " +
"Please check the default FileSystem setting in your configuration.");
return 1;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
try {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
String[] content = e.getLocalizedMessage().split("\n");
System.err.println("GetStoragePolicies: " + content[0]);
return 1;
}
return 0;
}
public static void main(String[] args) throws Exception {
int rc = ToolRunner.run(new GetStoragePolicies(), args);
System.exit(rc);
}
}

View File

@ -108,10 +108,10 @@ message SetStoragePolicyRequestProto {
message SetStoragePolicyResponseProto { // void response message SetStoragePolicyResponseProto { // void response
} }
message GetStoragePolicySuiteRequestProto { // void request message GetStoragePoliciesRequestProto { // void request
} }
message GetStoragePolicySuiteResponseProto { message GetStoragePoliciesResponseProto {
repeated BlockStoragePolicyProto policies = 1; repeated BlockStoragePolicyProto policies = 1;
} }
@ -706,8 +706,8 @@ service ClientNamenodeProtocol {
returns(SetReplicationResponseProto); returns(SetReplicationResponseProto);
rpc setStoragePolicy(SetStoragePolicyRequestProto) rpc setStoragePolicy(SetStoragePolicyRequestProto)
returns(SetStoragePolicyResponseProto); returns(SetStoragePolicyResponseProto);
rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto) rpc getStoragePolicies(GetStoragePoliciesRequestProto)
returns(GetStoragePolicySuiteResponseProto); returns(GetStoragePoliciesResponseProto);
rpc setPermission(SetPermissionRequestProto) rpc setPermission(SetPermissionRequestProto)
returns(SetPermissionResponseProto); returns(SetPermissionResponseProto);
rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto); rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);

View File

@ -73,11 +73,14 @@ public class TestBlockStoragePolicy {
public void testDefaultPolicies() { public void testDefaultPolicies() {
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>(); final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
expectedPolicyStrings.put(COLD, expectedPolicyStrings.put(COLD,
"BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], creationFallbacks=[], replicationFallbacks=[]"); "BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " +
"creationFallbacks=[], replicationFallbacks=[]}");
expectedPolicyStrings.put(WARM, expectedPolicyStrings.put(WARM,
"BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]"); "BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}");
expectedPolicyStrings.put(HOT, expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]"); "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
for(byte i = 1; i < 16; i++) { for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
@ -1102,7 +1105,7 @@ public class TestBlockStoragePolicy {
cluster.waitActive(); cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem(); final DistributedFileSystem fs = cluster.getFileSystem();
try { try {
BlockStoragePolicy[] policies = fs.getStoragePolicySuite(); BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(3, policies.length); Assert.assertEquals(3, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString()); policies[0].toString());