HDFS-5120. Add command-line support for manipulating cache pools.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1521240 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-09 18:53:01 +00:00
parent 7a74ca3694
commit 3a9cd79e9d
8 changed files with 523 additions and 6 deletions

View File

@ -905,4 +905,81 @@ public class StringUtils {
} }
return str.toString(); return str.toString();
} }
/**
* From a list of command-line arguments, remove both an option and the
* next argument.
*
* @param name Name of the option to remove. Example: -foo.
* @param args List of arguments.
* @return null if the option was not found; the value of the
* option otherwise.
*/
public static String popOptionWithArgument(String name, List<String> args) {
String val = null;
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
// stop parsing arguments when you see --
break;
} else if (cur.equals(name)) {
iter.remove();
if (!iter.hasNext()) {
throw new RuntimeException("option " + name + " requires 1 " +
"argument.");
}
val = iter.next();
iter.remove();
break;
}
}
return val;
}
/**
* From a list of command-line arguments, remove an option.
*
* @param name Name of the option to remove. Example: -foo.
* @param args List of arguments.
* @return true if the option was found and removed; false otherwise.
*/
public static boolean popOption(String name, List<String> args) {
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
// stop parsing arguments when you see --
break;
} else if (cur.equals(name)) {
iter.remove();
return true;
}
}
return false;
}
/**
* From a list of command-line arguments, return the first non-option
* argument. Non-option arguments are those which either come after
* a double dash (--) or do not start with a dash.
*
* @param args List of arguments.
* @return The first non-option argument, or null if there were none.
*/
public static String popFirstNonOption(List<String> args) {
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
if (!iter.hasNext()) {
return null;
}
cur = iter.next();
iter.remove();
return cur;
} else if (!cur.startsWith("-")) {
iter.remove();
return cur;
}
}
return null;
}
} }

View File

@ -27,6 +27,10 @@ HDFS-4949 (Unreleased)
HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin
Patrick McCabe) Patrick McCabe)
HDFS-5120. Add command-line support for manipulating cache pools.
(Contributed by Colin Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -1579,4 +1580,51 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absF); }.resolve(this, absF);
} }
/**
* Add a cache pool.
*
* @param req
* The request to add a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void addCachePool(CachePoolInfo info) throws IOException {
dfs.namenode.addCachePool(info);
}
/**
* Modify an existing cache pool.
*
* @param req
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void modifyCachePool(CachePoolInfo info) throws IOException {
dfs.namenode.modifyCachePool(info);
}
/**
* Remove a cache pool.
*
* @param cachePoolName
* Name of the cache pool to remove.
* @throws IOException
* if the cache pool did not exist, or could not be removed.
*/
public void removeCachePool(String name) throws IOException {
dfs.namenode.removeCachePool(name);
}
/**
* List all cache pools.
*
* @return A remote iterator from which you can get CachePoolInfo objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
return dfs.namenode.listCachePools("");
}
} }

View File

@ -1144,7 +1144,7 @@ public interface ClientProtocol {
public void addCachePool(CachePoolInfo info) throws IOException; public void addCachePool(CachePoolInfo info) throws IOException;
/** /**
* Modify a cache pool. * Modify an existing cache pool.
* *
* @param req * @param req
* The request to modify a cache pool. * The request to modify a cache pool.

View File

@ -254,9 +254,7 @@ final class CacheManager {
public synchronized void addCachePool(CachePoolInfo info) public synchronized void addCachePool(CachePoolInfo info)
throws IOException { throws IOException {
String poolName = info.getPoolName(); String poolName = info.getPoolName();
if (poolName.isEmpty()) { CachePool.validateName(poolName);
throw new IOException("invalid empty cache pool name");
}
CachePool pool = cachePools.get(poolName); CachePool pool = cachePools.get(poolName);
if (pool != null) { if (pool != null) {
throw new IOException("cache pool " + poolName + " already exists."); throw new IOException("cache pool " + poolName + " already exists.");

View File

@ -40,6 +40,8 @@ import org.apache.hadoop.security.UserGroupInformation;
public final class CachePool { public final class CachePool {
public static final Log LOG = LogFactory.getLog(CachePool.class); public static final Log LOG = LogFactory.getLog(CachePool.class);
public static final int DEFAULT_WEIGHT = 100;
@Nonnull @Nonnull
private final String poolName; private final String poolName;
@ -152,4 +154,13 @@ public final class CachePool {
append(", weight:").append(weight). append(", weight:").append(weight).
append(" }").toString(); append(" }").toString();
} }
public static void validateName(String name) throws IOException {
if (name.isEmpty()) {
// Empty pool names are not allowed because they would be highly
// confusing. They would also break the ability to list all pools
// by starting with prevKey = ""
throw new IOException("invalid empty cache pool name");
}
}
} }

View File

@ -24,6 +24,7 @@ import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.TreeSet; import java.util.TreeSet;
@ -36,6 +37,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -44,12 +47,14 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
@ -62,6 +67,8 @@ import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
/** /**
* This class provides some DFS administrative access shell commands. * This class provides some DFS administrative access shell commands.
*/ */
@ -455,6 +462,234 @@ public class DFSAdmin extends FsShell {
return exitCode; return exitCode;
} }
final private static String ADD_CACHE_POOL_USAGE =
"-addCachePool <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-weight <weight>]";
public int addCachePool(String argsArray[], int idx) throws IOException {
List<String> args= new LinkedList<String>();
for (int i = idx; i < argsArray.length; i++) {
args.add(argsArray[i]);
}
String owner = StringUtils.popOptionWithArgument("-owner", args);
if (owner == null) {
owner = UserGroupInformation.getCurrentUser().getShortUserName();
}
String group = StringUtils.popOptionWithArgument("-group", args);
if (group == null) {
group = UserGroupInformation.getCurrentUser().getGroupNames()[0];
}
String modeString = StringUtils.popOptionWithArgument("-mode", args);
int mode;
if (modeString == null) {
mode = FsPermission.getCachePoolDefault().toShort();
} else {
mode = Integer.parseInt(modeString, 8);
}
String weightString = StringUtils.popOptionWithArgument("-weight", args);
int weight;
if (weightString == null) {
weight = CachePool.DEFAULT_WEIGHT;
} else {
weight = Integer.parseInt(weightString);
}
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when creating a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + ADD_CACHE_POOL_USAGE);
return 1;
}
DistributedFileSystem dfs = getDFS();
CachePoolInfo info = new CachePoolInfo(name).
setOwnerName(owner).
setGroupName(group).
setMode(new FsPermission((short)mode)).
setWeight(weight);
try {
dfs.addCachePool(info);
} catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Successfully added cache pool " + name + ".");
return 0;
}
final private static String MODIFY_CACHE_POOL_USAGE =
"-modifyCachePool <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-weight <weight>]";
public int modifyCachePool(String argsArray[], int idx) throws IOException {
List<String> args = new LinkedList<String>();
for (int i = idx; i < argsArray.length; i++) {
args.add(argsArray[i]);
}
String owner = StringUtils.popOptionWithArgument("-owner", args);
String group = StringUtils.popOptionWithArgument("-group", args);
String modeString = StringUtils.popOptionWithArgument("-mode", args);
Integer mode = (modeString == null) ?
null : Integer.parseInt(modeString, 8);
String weightString = StringUtils.popOptionWithArgument("-weight", args);
Integer weight = (weightString == null) ?
null : Integer.parseInt(weightString);
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when creating a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("usage is " + MODIFY_CACHE_POOL_USAGE);
return 1;
}
boolean changed = false;
CachePoolInfo info = new CachePoolInfo(name);
if (owner != null) {
info.setOwnerName(owner);
changed = true;
}
if (group != null) {
info.setGroupName(group);
changed = true;
}
if (mode != null) {
info.setMode(new FsPermission(mode.shortValue()));
changed = true;
}
if (weight != null) {
info.setWeight(weight);
changed = true;
}
if (!changed) {
System.err.println("You must specify at least one attribute to " +
"change in the cache pool.");
return 1;
}
DistributedFileSystem dfs = getDFS();
try {
dfs.modifyCachePool(info);
} catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.print("Successfully modified cache pool " + name);
String prefix = " to have ";
if (owner != null) {
System.out.print(prefix + "owner name " + owner);
prefix = "and ";
}
if (group != null) {
System.out.print(prefix + "group name " + group);
prefix = "and ";
}
if (mode != null) {
System.out.print(prefix + "mode " + new FsPermission(mode.shortValue()));
prefix = "and ";
}
if (weight != null) {
System.out.print(prefix + "weight " + weight);
prefix = "and ";
}
System.out.print("\n");
return 0;
}
final private static String REMOVE_CACHE_POOL_USAGE =
"-removeCachePool <name>";
public int removeCachePool(String argsArray[], int idx) throws IOException {
List<String> args = new LinkedList<String>();
for (int i = idx; i < argsArray.length; i++) {
args.add(argsArray[i]);
}
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when deleting a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + REMOVE_CACHE_POOL_USAGE);
return 1;
}
DistributedFileSystem dfs = getDFS();
try {
dfs.removeCachePool(name);
} catch (IOException e) {
dfs.removeCachePool(name);
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Successfully removed cache pool " + name + ".");
return 0;
}
final private static String LIST_CACHE_POOLS_USAGE =
"-listCachePools] [-verbose] [name]";
private void listCachePool(CachePoolInfo info) {
System.out.print(String.format("%s\n", info.getPoolName()));
System.out.print(String.format("owner:\t%s\n", info.getOwnerName()));
System.out.print(String.format("group:\t%s\n", info.getGroupName()));
System.out.print(String.format("mode:\t%s\n", info.getMode()));
System.out.print(String.format("weight:\t%d\n", info.getWeight()));
System.out.print("\n");
}
public int listCachePools(String argsArray[], int idx) throws IOException {
List<String> args = new LinkedList<String>();
for (int i = idx; i < argsArray.length; i++) {
args.add(argsArray[i]);
}
String name = StringUtils.popFirstNonOption(args);
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("usage is " + LIST_CACHE_POOLS_USAGE);
return 1;
}
boolean gotResults = false;
DistributedFileSystem dfs = getDFS();
try {
RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
if (name != null) {
while (iter.hasNext()) {
CachePoolInfo info = iter.next();
if (info.getPoolName().equals(name)) {
listCachePool(info);
gotResults = true;
return 0;
}
}
} else {
while (iter.hasNext()) {
listCachePool(iter.next());
gotResults = true;
}
}
} catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
int ret = 0;
if (!gotResults) {
if (name != null) {
System.out.println("No cache pool named " + name + " found.");
ret = 1;
} else {
System.out.println("No cache pools found.");
ret = 1;
}
}
return ret;
}
public int rollEdits() throws IOException { public int rollEdits() throws IOException {
DistributedFileSystem dfs = getDFS(); DistributedFileSystem dfs = getDFS();
long txid = dfs.rollEdits(); long txid = dfs.rollEdits();
@ -582,6 +817,10 @@ public class DFSAdmin extends FsShell {
"\t[-fetchImage <local directory>]\n" + "\t[-fetchImage <local directory>]\n" +
"\t[-allowSnapshot <snapshotDir>]\n" + "\t[-allowSnapshot <snapshotDir>]\n" +
"\t[-disallowSnapshot <snapshotDir>]\n" + "\t[-disallowSnapshot <snapshotDir>]\n" +
"\t[" + ADD_CACHE_POOL_USAGE + "]\n" +
"\t[" + MODIFY_CACHE_POOL_USAGE + "]\n" +
"\t[" + REMOVE_CACHE_POOL_USAGE + "]\n" +
"\t[" + LIST_CACHE_POOLS_USAGE + "]\n" +
"\t[-help [cmd]]\n"; "\t[-help [cmd]]\n";
String report ="-report: \tReports basic filesystem information and statistics.\n"; String report ="-report: \tReports basic filesystem information and statistics.\n";
@ -679,6 +918,42 @@ public class DFSAdmin extends FsShell {
String disallowSnapshot = "-disallowSnapshot <snapshotDir>:\n" + String disallowSnapshot = "-disallowSnapshot <snapshotDir>:\n" +
"\tDo not allow snapshots to be taken on a directory any more.\n"; "\tDo not allow snapshots to be taken on a directory any more.\n";
String addCachePool = ADD_CACHE_POOL_USAGE + ": \n" +
"\tAdd a new cache pool.\n" +
"\t<name> is the name of the new pool. It must not already be used.\n" +
"\t<owner> is the owner of the pool. It defaults to the current\n" +
"\tuser name.\n" +
"\t<group> is the group of the pool. It defaults to the primary\n" +
"\tgroup name of the current user.\n" +
"\t<mode> is the mode of the pool. This is a UNIX-style numeric mode\n" +
"\targument, supplied as an octal number. For example, mode 0755\n" +
"\tgrants the owner all permissions, and grants everyone else\n" +
"\tonly read and list permissions.\n" +
"\tThe mode defaults to " +
String.format("0%03o",
FsPermission.getCachePoolDefault().toShort()) + "\n" +
"\t<weight> is the weight of the pool. This determines what share \n" +
"\tof cluster resources the pool will get. It defaults to " +
CachePool.DEFAULT_WEIGHT + "\n";
String modifyCachePool = MODIFY_CACHE_POOL_USAGE + ": \n" +
"\tAdd a new cache pool with the given name.\n" +
"\t<name> is the name of the pool to modify.\n" +
"\t<owner> is the new owner of the pool.\n" +
"\t<group> is the new group of the pool.\n" +
"\t<mode> is the new mode of the pool.\n" +
"\t<weight> is the new weight of the pool.\n";
String removeCachePool = REMOVE_CACHE_POOL_USAGE + ": \n" +
"\tRemove a cache pool.\n" +
"\t<name> is the name of the pool to remove.\n";
String listCachePools = " -listCachePools [-name <name>] [-verbose]\n" +
"\tList cache pools.\n" +
"\tIf <name> is specified, we will list only the cache pool with\n" +
"\tthat name. If <verbose> is specified, we will list detailed\n" +
"\tinformation about each pool\n";
String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
"\t\tis specified.\n"; "\t\tis specified.\n";
@ -726,6 +1001,14 @@ public class DFSAdmin extends FsShell {
System.out.println(allowSnapshot); System.out.println(allowSnapshot);
} else if ("disallowSnapshot".equalsIgnoreCase(cmd)) { } else if ("disallowSnapshot".equalsIgnoreCase(cmd)) {
System.out.println(disallowSnapshot); System.out.println(disallowSnapshot);
} else if ("addCachePool".equalsIgnoreCase(cmd)) {
System.out.println(addCachePool);
} else if ("modifyCachePool".equalsIgnoreCase(cmd)) {
System.out.println(modifyCachePool);
} else if ("removeCachePool".equalsIgnoreCase(cmd)) {
System.out.println(removeCachePool);
} else if ("listCachePools".equalsIgnoreCase(cmd)) {
System.out.println(listCachePools);
} else if ("help".equals(cmd)) { } else if ("help".equals(cmd)) {
System.out.println(help); System.out.println(help);
} else { } else {
@ -752,6 +1035,13 @@ public class DFSAdmin extends FsShell {
System.out.println(fetchImage); System.out.println(fetchImage);
System.out.println(allowSnapshot); System.out.println(allowSnapshot);
System.out.println(disallowSnapshot); System.out.println(disallowSnapshot);
System.out.println(addCachePool);
System.out.println(modifyCachePool);
System.out.println(removeCachePool);
System.out.println(listCachePools);
System.out.println(disallowSnapshot);
System.out.println(help); System.out.println(help);
System.out.println(); System.out.println();
ToolRunner.printGenericCommandUsage(System.out); ToolRunner.printGenericCommandUsage(System.out);
@ -988,6 +1278,18 @@ public class DFSAdmin extends FsShell {
} else if ("-fetchImage".equals(cmd)) { } else if ("-fetchImage".equals(cmd)) {
System.err.println("Usage: java DFSAdmin" System.err.println("Usage: java DFSAdmin"
+ " [-fetchImage <local directory>]"); + " [-fetchImage <local directory>]");
} else if ("-addCachePool".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [" + ADD_CACHE_POOL_USAGE + "]");
} else if ("-modifyCachePool".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [" + MODIFY_CACHE_POOL_USAGE + "]");
} else if ("-removeCachePool".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [" + REMOVE_CACHE_POOL_USAGE + "]");
} else if ("-listCachePools".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [" + LIST_CACHE_POOLS_USAGE + "]");
} else { } else {
System.err.println("Usage: java DFSAdmin"); System.err.println("Usage: java DFSAdmin");
System.err.println("Note: Administrative commands can only be run as the HDFS superuser."); System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
@ -1013,6 +1315,10 @@ public class DFSAdmin extends FsShell {
System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]");
System.err.println(" [-setBalancerBandwidth <bandwidth in bytes per second>]"); System.err.println(" [-setBalancerBandwidth <bandwidth in bytes per second>]");
System.err.println(" [-fetchImage <local directory>]"); System.err.println(" [-fetchImage <local directory>]");
System.err.println(" [" + ADD_CACHE_POOL_USAGE + "]");
System.err.println(" [" + MODIFY_CACHE_POOL_USAGE + "]");
System.err.println(" [" + REMOVE_CACHE_POOL_USAGE + "]");
System.err.println(" [" + LIST_CACHE_POOLS_USAGE + "]");
System.err.println(" [-help [cmd]]"); System.err.println(" [-help [cmd]]");
System.err.println(); System.err.println();
ToolRunner.printGenericCommandUsage(System.err); ToolRunner.printGenericCommandUsage(System.err);
@ -1185,6 +1491,14 @@ public class DFSAdmin extends FsShell {
exitCode = setBalancerBandwidth(argv, i); exitCode = setBalancerBandwidth(argv, i);
} else if ("-fetchImage".equals(cmd)) { } else if ("-fetchImage".equals(cmd)) {
exitCode = fetchImage(argv, i); exitCode = fetchImage(argv, i);
} else if ("-addCachePool".equals(cmd)) {
exitCode = addCachePool(argv, i);
} else if ("-modifyCachePool".equals(cmd)) {
exitCode = modifyCachePool(argv, i);
} else if ("-removeCachePool".equals(cmd)) {
exitCode = removeCachePool(argv, i);
} else if ("-listCachePools".equals(cmd)) {
exitCode = listCachePools(argv, i);
} else if ("-help".equals(cmd)) { } else if ("-help".equals(cmd)) {
if (i < argv.length) { if (i < argv.length) {
printHelp(argv[i]); printHelp(argv[i]);

View File

@ -16356,7 +16356,7 @@
</comparators> </comparators>
</test> </test>
<test> <!--Tested --> <test> <!--Tested -->
<description>Verifying clrSpaceQuota operation is not permitted in safemode</description> <description>Verifying clrSpaceQuota operation is not permitted in safemode</description>
<test-commands> <test-commands>
<command>-fs NAMENODE -mkdir /test </command> <command>-fs NAMENODE -mkdir /test </command>
@ -16374,5 +16374,70 @@
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
<test> <!--Tested -->
<description>Testing listing no cache pools</description>
<test-commands>
<dfs-admin-command>-fs NAMENODE -listCachePools</dfs-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>No cache pools found.</expected-output>
</comparator>
</comparators>
</test>
<test> <!--Tested -->
<description>Testing adding a cache pool</description>
<test-commands>
<dfs-admin-command>-fs NAMENODE -addCachePool foo</dfs-admin-command>
</test-commands>
<cleanup-commands>
<dfs-admin-command>-fs NAMENODE -removeCachePool foo</dfs-admin-command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Successfully added cache pool foo.</expected-output>
</comparator>
</comparators>
</test>
<test> <!--Tested -->
<description>Testing deleting a cache pool</description>
<test-commands>
<dfs-admin-command>-fs NAMENODE -addCachePool foo</dfs-admin-command>
<dfs-admin-command>-fs NAMENODE -removeCachePool foo</dfs-admin-command>
</test-commands>
<cleanup-commands>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Successfully removed cache pool foo.</expected-output>
</comparator>
</comparators>
</test>
<test> <!--Tested -->
<description>Testing listing a cache pool</description>
<test-commands>
<dfs-admin-command>-fs NAMENODE -addCachePool foo -owner bob -group bob -mode 0664</dfs-admin-command>
<dfs-admin-command>-fs NAMENODE -listCachePools foo</dfs-admin-command>
</test-commands>
<cleanup-commands>
<dfs-admin-command>-fs NAMENODE -removeCachePool foo</dfs-admin-command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo</expected-output>
</comparator>
</comparators>
</test>
</tests> </tests>
</configuration> </configuration>