HDFS-7323. Move the get/setStoragePolicy commands out from dfsadmin. (jing9 via yliu)

This commit is contained in:
yliu 2015-01-12 00:35:43 +08:00
parent ef3c3a832c
commit 5b0d060d2a
7 changed files with 372 additions and 156 deletions

View File

@ -487,6 +487,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9) HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9)
HDFS-7323. Move the get/setStoragePolicy commands out from dfsadmin.
(jing9 via yliu)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode. HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -49,7 +49,7 @@ function hadoop_usage
echo " secondarynamenode run the DFS secondary namenode" echo " secondarynamenode run the DFS secondary namenode"
echo " snapshotDiff diff two snapshots of a directory or diff the" echo " snapshotDiff diff two snapshots of a directory or diff the"
echo " current directory contents with a snapshot" echo " current directory contents with a snapshot"
echo " storagepolicies get all the existing block storage policies" echo " storagepolicies list/get/set block storage policies"
echo " version print the version" echo " version print the version"
echo " zkfc run the ZK Failover Controller daemon" echo " zkfc run the ZK Failover Controller daemon"
echo "" echo ""
@ -228,7 +228,7 @@ case ${COMMAND} in
CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
;; ;;
storagepolicies) storagepolicies)
CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
;; ;;
version) version)
CLASS=org.apache.hadoop.util.VersionInfo CLASS=org.apache.hadoop.util.VersionInfo

View File

@ -160,7 +160,7 @@ goto :eof
goto :eof goto :eof
:storagepolicies :storagepolicies
set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies set CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
goto :eof goto :eof
@rem This changes %1, %2 etc. Hence those cannot be used after calling this. @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
@ -216,7 +216,7 @@ goto :eof
@echo Use -help to see options @echo Use -help to see options
@echo cacheadmin configure the HDFS cache @echo cacheadmin configure the HDFS cache
@echo mover run a utility to move block replicas across storage types @echo mover run a utility to move block replicas across storage types
@echo storagepolicies get all the existing block storage policies @echo storagepolicies list/get/set block storage policies
@echo. @echo.
@echo Most commands print help when invoked w/o parameters. @echo Most commands print help when invoked w/o parameters.

View File

@ -52,7 +52,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -68,10 +67,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.GenericRefreshProtocol;
@ -398,8 +395,6 @@ public class DFSAdmin extends FsShell {
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" + "\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" + "\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
"\t[-metasave filename]\n" + "\t[-metasave filename]\n" +
"\t[-setStoragePolicy path policyName]\n" +
"\t[-getStoragePolicy path]\n" +
"\t[-triggerBlockReport [-incremental] <datanode_host:ipc_port>]\n" + "\t[-triggerBlockReport [-incremental] <datanode_host:ipc_port>]\n" +
"\t[-help [cmd]]\n"; "\t[-help [cmd]]\n";
@ -608,35 +603,6 @@ public class DFSAdmin extends FsShell {
return inSafeMode; return inSafeMode;
} }
public int setStoragePolicy(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS();
dfs.setStoragePolicy(new Path(argv[1]), argv[2]);
System.out.println("Set storage policy " + argv[2] + " on " + argv[1]);
return 0;
}
public int getStoragePolicy(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS();
HdfsFileStatus status = dfs.getClient().getFileInfo(argv[1]);
if (status == null) {
throw new FileNotFoundException("File/Directory does not exist: "
+ argv[1]);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
System.out.println("The storage policy of " + argv[1] + " is unspecified");
return 0;
}
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy p : policies) {
if (p.getId() == storagePolicyId) {
System.out.println("The storage policy of " + argv[1] + ":\n" + p);
return 0;
}
}
throw new IOException("Cannot identify the storage policy for " + argv[1]);
}
public int triggerBlockReport(String[] argv) throws IOException { public int triggerBlockReport(String[] argv) throws IOException {
List<String> args = new LinkedList<String>(); List<String> args = new LinkedList<String>();
for (int j = 1; j < argv.length; j++) { for (int j = 1; j < argv.length; j++) {
@ -1016,12 +982,6 @@ public class DFSAdmin extends FsShell {
+ "\tGet the information about the given datanode. This command can\n" + "\tGet the information about the given datanode. This command can\n"
+ "\tbe used for checking if a datanode is alive.\n"; + "\tbe used for checking if a datanode is alive.\n";
String setStoragePolicy = "-setStoragePolicy path policyName\n"
+ "\tSet the storage policy for a file/directory.\n";
String getStoragePolicy = "-getStoragePolicy path\n"
+ "\tGet the storage policy for a file/directory.\n";
String triggerBlockReport = String triggerBlockReport =
"-triggerBlockReport [-incremental] <datanode_host:ipc_port>\n" "-triggerBlockReport [-incremental] <datanode_host:ipc_port>\n"
+ "\tTrigger a block report for the datanode.\n" + "\tTrigger a block report for the datanode.\n"
@ -1087,10 +1047,6 @@ public class DFSAdmin extends FsShell {
System.out.println(shutdownDatanode); System.out.println(shutdownDatanode);
} else if ("getDatanodeInfo".equalsIgnoreCase(cmd)) { } else if ("getDatanodeInfo".equalsIgnoreCase(cmd)) {
System.out.println(getDatanodeInfo); System.out.println(getDatanodeInfo);
} else if ("setStoragePolicy".equalsIgnoreCase(cmd)) {
System.out.println(setStoragePolicy);
} else if ("getStoragePolicy".equalsIgnoreCase(cmd)) {
System.out.println(getStoragePolicy);
} else if ("help".equals(cmd)) { } else if ("help".equals(cmd)) {
System.out.println(help); System.out.println(help);
} else { } else {
@ -1123,8 +1079,6 @@ public class DFSAdmin extends FsShell {
System.out.println(disallowSnapshot); System.out.println(disallowSnapshot);
System.out.println(shutdownDatanode); System.out.println(shutdownDatanode);
System.out.println(getDatanodeInfo); System.out.println(getDatanodeInfo);
System.out.println(setStoragePolicy);
System.out.println(getStoragePolicy);
System.out.println(triggerBlockReport); System.out.println(triggerBlockReport);
System.out.println(help); System.out.println(help);
System.out.println(); System.out.println();
@ -1555,12 +1509,6 @@ public class DFSAdmin extends FsShell {
} else if ("-safemode".equals(cmd)) { } else if ("-safemode".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-safemode enter | leave | get | wait]"); + " [-safemode enter | leave | get | wait]");
} else if ("-setStoragePolicy".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-setStoragePolicy path policyName]");
} else if ("-getStoragePolicy".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-getStoragePolicy path]");
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) { } else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-allowSnapshot <snapshotDir>]"); + " [-allowSnapshot <snapshotDir>]");
@ -1780,21 +1728,11 @@ public class DFSAdmin extends FsShell {
printUsage(cmd); printUsage(cmd);
return exitCode; return exitCode;
} }
} else if ("-setStoragePolicy".equals(cmd)) {
if (argv.length != 3) {
printUsage(cmd);
return exitCode;
}
} else if ("-triggerBlockReport".equals(cmd)) { } else if ("-triggerBlockReport".equals(cmd)) {
if (argv.length < 1) { if (argv.length < 1) {
printUsage(cmd); printUsage(cmd);
return exitCode; return exitCode;
} }
} else if ("-getStoragePolicy".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} }
// initialize DFSAdmin // initialize DFSAdmin
@ -1868,10 +1806,6 @@ public class DFSAdmin extends FsShell {
exitCode = getDatanodeInfo(argv, i); exitCode = getDatanodeInfo(argv, i);
} else if ("-reconfig".equals(cmd)) { } else if ("-reconfig".equals(cmd)) {
exitCode = reconfig(argv, i); exitCode = reconfig(argv, i);
} else if ("-setStoragePolicy".equals(cmd)) {
exitCode = setStoragePolicy(argv);
} else if ("-getStoragePolicy".equals(cmd)) {
exitCode = getStoragePolicy(argv);
} else if ("-triggerBlockReport".equals(cmd)) { } else if ("-triggerBlockReport".equals(cmd)) {
exitCode = triggerBlockReport(argv); exitCode = triggerBlockReport(argv);
} else if ("-help".equals(cmd)) { } else if ("-help".equals(cmd)) {

View File

@ -1,65 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
/**
* A tool listing all the existing block storage policies. No argument is
* required when using this tool.
*/
public class GetStoragePolicies extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
FileSystem fs = FileSystem.get(getConf());
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("GetStoragePolicies can only be used against HDFS. " +
"Please check the default FileSystem setting in your configuration.");
return 1;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
try {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
String[] content = e.getLocalizedMessage().split("\n");
System.err.println("GetStoragePolicies: " + content[0]);
return 1;
}
return 0;
}
public static void main(String[] args) throws Exception {
int rc = ToolRunner.run(new GetStoragePolicies(), args);
System.exit(rc);
}
}

View File

@ -0,0 +1,339 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
/**
* This class implements block storage policy operations.
*/
public class StoragePolicyAdmin extends Configured implements Tool {
private static final int MAX_LINE_WIDTH = 80;
public static void main(String[] argsArray) throws Exception {
final StoragePolicyAdmin admin = new StoragePolicyAdmin(new
Configuration());
System.exit(admin.run(argsArray));
}
private static DistributedFileSystem getDFS(Configuration conf)
throws IOException {
final FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
return (DistributedFileSystem) fs;
}
/**
* NN exceptions contain the stack trace as part of the exception message.
* When it's a known error, pretty-print the error and squish the stack trace.
*/
private static String prettifyException(Exception e) {
return e.getClass().getSimpleName() + ": " +
e.getLocalizedMessage().split("\n")[0];
}
private static TableListing getOptionDescriptionListing() {
return new TableListing.Builder()
.addField("").addField("", true)
.wrapWidth(MAX_LINE_WIDTH).hideHeaders().build();
}
public StoragePolicyAdmin(Configuration conf) {
super(conf);
}
@Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
printUsage(false);
return 1;
}
final Command command = determineCommand(args[0]);
if (command == null) {
System.err.println("Can't understand command '" + args[0] + "'");
if (!args[0].startsWith("-")) {
System.err.println("Command names must start with dashes.");
}
printUsage(false);
return 1;
}
final List<String> argsList = new LinkedList<String>();
argsList.addAll(Arrays.asList(args).subList(1, args.length));
try {
return command.run(getConf(), argsList);
} catch (IllegalArgumentException e) {
System.err.println(prettifyException(e));
return -1;
}
}
interface Command {
String getName();
String getShortUsage();
String getLongUsage();
int run(Configuration conf, List<String> args) throws IOException;
}
/** Command to list all the existing storage policies */
private static class ListStoragePoliciesCommand implements Command {
@Override
public String getName() {
return "-listPolicies";
}
@Override
public String getShortUsage() {
return "[" + getName() + "]\n";
}
@Override
public String getLongUsage() {
return getShortUsage() + "\n" +
"List all the existing block storage policies.\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final DistributedFileSystem dfs = getDFS(conf);
try {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
return 0;
}
}
/** Command to get the storage policy of a file/directory */
private static class GetStoragePolicyCommand implements Command {
@Override
public String getName() {
return "-getStoragePolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path>]\n";
}
@Override
public String getLongUsage() {
final TableListing listing = getOptionDescriptionListing();
listing.addRow("<path>",
"The path of the file/directory for getting the storage policy");
return getShortUsage() + "\n" +
"Get the storage policy of a file/directory.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path with -path.\nUsage:" +
getLongUsage());
return 1;
}
final DistributedFileSystem dfs = getDFS(conf);
try {
HdfsFileStatus status = dfs.getClient().getFileInfo(path);
if (status == null) {
System.err.println("File/Directory does not exist: " + path);
return 2;
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
System.out.println("The storage policy of " + path + " is unspecified");
return 0;
}
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy p : policies) {
if (p.getId() == storagePolicyId) {
System.out.println("The storage policy of " + path + ":\n" + p);
return 0;
}
}
} catch (Exception e) {
System.err.println(prettifyException(e));
return 2;
}
System.err.println("Cannot identify the storage policy for " + path);
return 2;
}
}
/** Command to set the storage policy to a file/directory */
private static class SetStoragePolicyCommand implements Command {
@Override
public String getName() {
return "-setStoragePolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path> -policy <policy>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = getOptionDescriptionListing();
listing.addRow("<path>", "The path of the file/directory to set storage" +
" policy");
listing.addRow("<policy>", "The name of the block storage policy");
return getShortUsage() + "\n" +
"Set the storage policy to a file/directory.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path for setting the storage " +
"policy.\nUsage: " + getLongUsage());
return 1;
}
final String policyName = StringUtils.popOptionWithArgument("-policy",
args);
if (policyName == null) {
System.err.println("Please specify the policy name.\nUsage: " +
getLongUsage());
return 1;
}
final DistributedFileSystem dfs = getDFS(conf);
try {
dfs.setStoragePolicy(new Path(path), policyName);
System.out.println("Set storage policy " + policyName + " on " + path);
} catch (Exception e) {
System.err.println(prettifyException(e));
return 2;
}
return 0;
}
}
private static class HelpCommand implements Command {
@Override
public String getName() {
return "-help";
}
@Override
public String getShortUsage() {
return "[-help <command-name>]\n";
}
@Override
public String getLongUsage() {
final TableListing listing = getOptionDescriptionListing();
listing.addRow("<command-name>", "The command for which to get " +
"detailed help. If no command is specified, print detailed help for " +
"all commands");
return getShortUsage() + "\n" +
"Get detailed help about a command.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
if (args.size() == 0) {
for (Command command : COMMANDS) {
System.err.println(command.getLongUsage());
}
return 0;
}
if (args.size() != 1) {
System.out.println("You must give exactly one argument to -help.");
return 0;
}
final String commandName = args.get(0);
// prepend a dash to match against the command names
final Command command = determineCommand("-" + commandName);
if (command == null) {
System.err.print("Unknown command '" + commandName + "'.\n");
System.err.print("Valid help command names are:\n");
String separator = "";
for (Command c : COMMANDS) {
System.err.print(separator + c.getName().substring(1));
separator = ", ";
}
System.err.print("\n");
return 1;
}
System.err.print(command.getLongUsage());
return 0;
}
}
private static final Command[] COMMANDS = {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
new GetStoragePolicyCommand(),
new HelpCommand()
};
private static void printUsage(boolean longUsage) {
System.err.println(
"Usage: bin/hdfs storagepolicies [COMMAND]");
for (Command command : COMMANDS) {
if (longUsage) {
System.err.print(command.getLongUsage());
} else {
System.err.print(" " + command.getShortUsage());
}
}
System.err.println();
}
private static Command determineCommand(String commandName) {
for (Command COMMAND : COMMANDS) {
if (COMMAND.getName().equals(commandName)) {
return COMMAND;
}
}
return null;
}
}

View File

@ -15,12 +15,16 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs.tools;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.junit.After; import org.junit.After;
@ -28,7 +32,7 @@ import org.junit.Before;
import org.junit.Test; import org.junit.Test;
/** /**
* Test storage policy related DFSAdmin commands * Test StoragePolicyAdmin commands
*/ */
public class TestStoragePolicyCommands { public class TestStoragePolicyCommands {
private static final short REPL = 1; private static final short REPL = 1;
@ -48,10 +52,10 @@ public class TestStoragePolicyCommands {
@After @After
public void clusterShutdown() throws IOException{ public void clusterShutdown() throws IOException{
if(fs != null){ if(fs != null) {
fs.close(); fs.close();
} }
if(cluster != null){ if(cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
} }
@ -62,27 +66,28 @@ public class TestStoragePolicyCommands {
final Path bar = new Path(foo, "bar"); final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0); DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo", 0, final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
"The storage policy of " + foo.toString() + " is unspecified", conf); DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo/bar", 0, "The storage policy of " + foo.toString() + " is unspecified");
"The storage policy of " + bar.toString() + " is unspecified", conf); DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
"The storage policy of " + bar.toString() + " is unspecified");
DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo WARM", 0, DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo -policy WARM", 0,
"Set storage policy WARM on " + foo.toString(), conf); "Set storage policy WARM on " + foo.toString());
DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo/bar COLD", 0, DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD",
"Set storage policy COLD on " + bar.toString(), conf); 0, "Set storage policy COLD on " + bar.toString());
DFSTestUtil.DFSAdminRun("-setStoragePolicy /fooz WARM", -1, DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM",
"File/Directory does not exist: /fooz", conf); 2, "File/Directory does not exist: /fooz");
final BlockStoragePolicySuite suite = BlockStoragePolicySuite final BlockStoragePolicySuite suite = BlockStoragePolicySuite
.createDefaultSuite(); .createDefaultSuite();
final BlockStoragePolicy warm = suite.getPolicy("WARM"); final BlockStoragePolicy warm = suite.getPolicy("WARM");
final BlockStoragePolicy cold = suite.getPolicy("COLD"); final BlockStoragePolicy cold = suite.getPolicy("COLD");
DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo", 0, DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
"The storage policy of " + foo.toString() + ":\n" + warm, conf); "The storage policy of " + foo.toString() + ":\n" + warm);
DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo/bar", 0, DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
"The storage policy of " + bar.toString() + ":\n" + cold, conf); "The storage policy of " + bar.toString() + ":\n" + cold);
DFSTestUtil.DFSAdminRun("-getStoragePolicy /fooz", -1, DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
"File/Directory does not exist: /fooz", conf); "File/Directory does not exist: /fooz");
} }
} }