From 3a9cd79e9ddd5a9499e28633ccccdc9eef22b813 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 9 Sep 2013 18:53:01 +0000 Subject: [PATCH] HDFS-5120. Add command-line support for manipulating cache pools. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1521240 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/util/StringUtils.java | 77 +++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 4 + .../hadoop/hdfs/DistributedFileSystem.java | 50 ++- .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 4 +- .../hdfs/server/namenode/CachePool.java | 11 + .../apache/hadoop/hdfs/tools/DFSAdmin.java | 314 ++++++++++++++++++ .../src/test/resources/testHDFSConf.xml | 67 +++- 8 files changed, 523 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 284a042e83b..32e5572c2e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -905,4 +905,81 @@ public static String getStackTrace(Thread t) { } return str.toString(); } + + /** + * From a list of command-line arguments, remove both an option and the + * next argument. + * + * @param name Name of the option to remove. Example: -foo. + * @param args List of arguments. + * @return null if the option was not found; the value of the + * option otherwise. + */ + public static String popOptionWithArgument(String name, List args) { + String val = null; + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + // stop parsing arguments when you see -- + break; + } else if (cur.equals(name)) { + iter.remove(); + if (!iter.hasNext()) { + throw new RuntimeException("option " + name + " requires 1 " + + "argument."); + } + val = iter.next(); + iter.remove(); + break; + } + } + return val; + } + + /** + * From a list of command-line arguments, remove an option. + * + * @param name Name of the option to remove. Example: -foo. + * @param args List of arguments. + * @return true if the option was found and removed; false otherwise. + */ + public static boolean popOption(String name, List args) { + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + // stop parsing arguments when you see -- + break; + } else if (cur.equals(name)) { + iter.remove(); + return true; + } + } + return false; + } + + /** + * From a list of command-line arguments, return the first non-option + * argument. Non-option arguments are those which either come after + * a double dash (--) or do not start with a dash. + * + * @param args List of arguments. + * @return The first non-option argument, or null if there were none. + */ + public static String popFirstNonOption(List args) { + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + if (!iter.hasNext()) { + return null; + } + cur = iter.next(); + iter.remove(); + return cur; + } else if (!cur.startsWith("-")) { + iter.remove(); + return cur; + } + } + return null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 0a903312a96..69d33a6927f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -27,6 +27,10 @@ HDFS-4949 (Unreleased) HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin Patrick McCabe) + HDFS-5120. Add command-line support for manipulating cache pools. + (Contributed by Colin Patrick McCabe) + + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 6cb84741ab1..c779f889a2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -1578,5 +1579,52 @@ public Boolean next(final FileSystem fs, final Path p) } }.resolve(this, absF); } - + + /** + * Add a cache pool. + * + * @param req + * The request to add a cache pool. + * @throws IOException + * If the request could not be completed. + */ + public void addCachePool(CachePoolInfo info) throws IOException { + dfs.namenode.addCachePool(info); + } + + /** + * Modify an existing cache pool. + * + * @param req + * The request to modify a cache pool. + * @throws IOException + * If the request could not be completed. + */ + public void modifyCachePool(CachePoolInfo info) throws IOException { + dfs.namenode.modifyCachePool(info); + } + + /** + * Remove a cache pool. + * + * @param cachePoolName + * Name of the cache pool to remove. + * @throws IOException + * if the cache pool did not exist, or could not be removed. + */ + public void removeCachePool(String name) throws IOException { + dfs.namenode.removeCachePool(name); + } + + /** + * List all cache pools. + * + * @return A remote iterator from which you can get CachePoolInfo objects. + * Requests will be made as needed. + * @throws IOException + * If there was an error listing cache pools. + */ + public RemoteIterator listCachePools() throws IOException { + return dfs.namenode.listCachePools(""); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 45b041ffb4a..cc31c397c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1144,7 +1144,7 @@ public RemoteIterator listPathCacheEntries(long prevId, public void addCachePool(CachePoolInfo info) throws IOException; /** - * Modify a cache pool. + * Modify an existing cache pool. * * @param req * The request to modify a cache pool. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 53499c86a69..b71e4d0d369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -254,9 +254,7 @@ public synchronized List> removeEntries(List entryIds, public synchronized void addCachePool(CachePoolInfo info) throws IOException { String poolName = info.getPoolName(); - if (poolName.isEmpty()) { - throw new IOException("invalid empty cache pool name"); - } + CachePool.validateName(poolName); CachePool pool = cachePools.get(poolName); if (pool != null) { throw new IOException("cache pool " + poolName + " already exists."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 14a786bb195..36ebd402e41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -40,6 +40,8 @@ public final class CachePool { public static final Log LOG = LogFactory.getLog(CachePool.class); + public static final int DEFAULT_WEIGHT = 100; + @Nonnull private final String poolName; @@ -152,4 +154,13 @@ public String toString() { append(", weight:").append(weight). append(" }").toString(); } + + public static void validateName(String name) throws IOException { + if (name.isEmpty()) { + // Empty pool names are not allowed because they would be highly + // confusing. They would also break the ability to list all pools + // by starting with prevKey = "" + throw new IOException("invalid empty cache pool name"); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 98691df6a57..912569a9c1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.TreeSet; @@ -36,6 +37,8 @@ import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -44,12 +47,14 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.ipc.RPC; @@ -62,6 +67,8 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import com.google.common.base.Joiner; + /** * This class provides some DFS administrative access shell commands. */ @@ -455,6 +462,234 @@ public int saveNamespace() throws IOException { return exitCode; } + final private static String ADD_CACHE_POOL_USAGE = + "-addCachePool [-owner ] " + + "[-group ] [-mode ] [-weight ]"; + + public int addCachePool(String argsArray[], int idx) throws IOException { + List args= new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String owner = StringUtils.popOptionWithArgument("-owner", args); + if (owner == null) { + owner = UserGroupInformation.getCurrentUser().getShortUserName(); + } + String group = StringUtils.popOptionWithArgument("-group", args); + if (group == null) { + group = UserGroupInformation.getCurrentUser().getGroupNames()[0]; + } + String modeString = StringUtils.popOptionWithArgument("-mode", args); + int mode; + if (modeString == null) { + mode = FsPermission.getCachePoolDefault().toShort(); + } else { + mode = Integer.parseInt(modeString, 8); + } + String weightString = StringUtils.popOptionWithArgument("-weight", args); + int weight; + if (weightString == null) { + weight = CachePool.DEFAULT_WEIGHT; + } else { + weight = Integer.parseInt(weightString); + } + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + ADD_CACHE_POOL_USAGE); + return 1; + } + DistributedFileSystem dfs = getDFS(); + CachePoolInfo info = new CachePoolInfo(name). + setOwnerName(owner). + setGroupName(group). + setMode(new FsPermission((short)mode)). + setWeight(weight); + try { + dfs.addCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully added cache pool " + name + "."); + return 0; + } + + final private static String MODIFY_CACHE_POOL_USAGE = + "-modifyCachePool [-owner ] " + + "[-group ] [-mode ] [-weight ]"; + + public int modifyCachePool(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String owner = StringUtils.popOptionWithArgument("-owner", args); + String group = StringUtils.popOptionWithArgument("-group", args); + String modeString = StringUtils.popOptionWithArgument("-mode", args); + Integer mode = (modeString == null) ? + null : Integer.parseInt(modeString, 8); + String weightString = StringUtils.popOptionWithArgument("-weight", args); + Integer weight = (weightString == null) ? + null : Integer.parseInt(weightString); + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("usage is " + MODIFY_CACHE_POOL_USAGE); + return 1; + } + boolean changed = false; + CachePoolInfo info = new CachePoolInfo(name); + if (owner != null) { + info.setOwnerName(owner); + changed = true; + } + if (group != null) { + info.setGroupName(group); + changed = true; + } + if (mode != null) { + info.setMode(new FsPermission(mode.shortValue())); + changed = true; + } + if (weight != null) { + info.setWeight(weight); + changed = true; + } + if (!changed) { + System.err.println("You must specify at least one attribute to " + + "change in the cache pool."); + return 1; + } + DistributedFileSystem dfs = getDFS(); + try { + dfs.modifyCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.print("Successfully modified cache pool " + name); + String prefix = " to have "; + if (owner != null) { + System.out.print(prefix + "owner name " + owner); + prefix = "and "; + } + if (group != null) { + System.out.print(prefix + "group name " + group); + prefix = "and "; + } + if (mode != null) { + System.out.print(prefix + "mode " + new FsPermission(mode.shortValue())); + prefix = "and "; + } + if (weight != null) { + System.out.print(prefix + "weight " + weight); + prefix = "and "; + } + System.out.print("\n"); + return 0; + } + + final private static String REMOVE_CACHE_POOL_USAGE = + "-removeCachePool "; + + public int removeCachePool(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when deleting a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + REMOVE_CACHE_POOL_USAGE); + return 1; + } + DistributedFileSystem dfs = getDFS(); + try { + dfs.removeCachePool(name); + } catch (IOException e) { + dfs.removeCachePool(name); + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully removed cache pool " + name + "."); + return 0; + } + + final private static String LIST_CACHE_POOLS_USAGE = + "-listCachePools] [-verbose] [name]"; + + private void listCachePool(CachePoolInfo info) { + System.out.print(String.format("%s\n", info.getPoolName())); + System.out.print(String.format("owner:\t%s\n", info.getOwnerName())); + System.out.print(String.format("group:\t%s\n", info.getGroupName())); + System.out.print(String.format("mode:\t%s\n", info.getMode())); + System.out.print(String.format("weight:\t%d\n", info.getWeight())); + System.out.print("\n"); + } + + public int listCachePools(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String name = StringUtils.popFirstNonOption(args); + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("usage is " + LIST_CACHE_POOLS_USAGE); + return 1; + } + boolean gotResults = false; + DistributedFileSystem dfs = getDFS(); + try { + RemoteIterator iter = dfs.listCachePools(); + if (name != null) { + while (iter.hasNext()) { + CachePoolInfo info = iter.next(); + if (info.getPoolName().equals(name)) { + listCachePool(info); + gotResults = true; + return 0; + } + } + } else { + while (iter.hasNext()) { + listCachePool(iter.next()); + gotResults = true; + } + } + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + int ret = 0; + if (!gotResults) { + if (name != null) { + System.out.println("No cache pool named " + name + " found."); + ret = 1; + } else { + System.out.println("No cache pools found."); + ret = 1; + } + } + return ret; + } + public int rollEdits() throws IOException { DistributedFileSystem dfs = getDFS(); long txid = dfs.rollEdits(); @@ -582,6 +817,10 @@ private void printHelp(String cmd) { "\t[-fetchImage ]\n" + "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + + "\t[" + ADD_CACHE_POOL_USAGE + "]\n" + + "\t[" + MODIFY_CACHE_POOL_USAGE + "]\n" + + "\t[" + REMOVE_CACHE_POOL_USAGE + "]\n" + + "\t[" + LIST_CACHE_POOLS_USAGE + "]\n" + "\t[-help [cmd]]\n"; String report ="-report: \tReports basic filesystem information and statistics.\n"; @@ -679,6 +918,42 @@ private void printHelp(String cmd) { String disallowSnapshot = "-disallowSnapshot :\n" + "\tDo not allow snapshots to be taken on a directory any more.\n"; + String addCachePool = ADD_CACHE_POOL_USAGE + ": \n" + + "\tAdd a new cache pool.\n" + + "\t is the name of the new pool. It must not already be used.\n" + + "\t is the owner of the pool. It defaults to the current\n" + + "\tuser name.\n" + + "\t is the group of the pool. It defaults to the primary\n" + + "\tgroup name of the current user.\n" + + "\t is the mode of the pool. This is a UNIX-style numeric mode\n" + + "\targument, supplied as an octal number. For example, mode 0755\n" + + "\tgrants the owner all permissions, and grants everyone else\n" + + "\tonly read and list permissions.\n" + + "\tThe mode defaults to " + + String.format("0%03o", + FsPermission.getCachePoolDefault().toShort()) + "\n" + + "\t is the weight of the pool. This determines what share \n" + + "\tof cluster resources the pool will get. It defaults to " + + CachePool.DEFAULT_WEIGHT + "\n"; + + String modifyCachePool = MODIFY_CACHE_POOL_USAGE + ": \n" + + "\tAdd a new cache pool with the given name.\n" + + "\t is the name of the pool to modify.\n" + + "\t is the new owner of the pool.\n" + + "\t is the new group of the pool.\n" + + "\t is the new mode of the pool.\n" + + "\t is the new weight of the pool.\n"; + + String removeCachePool = REMOVE_CACHE_POOL_USAGE + ": \n" + + "\tRemove a cache pool.\n" + + "\t is the name of the pool to remove.\n"; + + String listCachePools = " -listCachePools [-name ] [-verbose]\n" + + "\tList cache pools.\n" + + "\tIf is specified, we will list only the cache pool with\n" + + "\tthat name. If is specified, we will list detailed\n" + + "\tinformation about each pool\n"; + String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + "\t\tis specified.\n"; @@ -726,6 +1001,14 @@ private void printHelp(String cmd) { System.out.println(allowSnapshot); } else if ("disallowSnapshot".equalsIgnoreCase(cmd)) { System.out.println(disallowSnapshot); + } else if ("addCachePool".equalsIgnoreCase(cmd)) { + System.out.println(addCachePool); + } else if ("modifyCachePool".equalsIgnoreCase(cmd)) { + System.out.println(modifyCachePool); + } else if ("removeCachePool".equalsIgnoreCase(cmd)) { + System.out.println(removeCachePool); + } else if ("listCachePools".equalsIgnoreCase(cmd)) { + System.out.println(listCachePools); } else if ("help".equals(cmd)) { System.out.println(help); } else { @@ -752,6 +1035,13 @@ private void printHelp(String cmd) { System.out.println(fetchImage); System.out.println(allowSnapshot); System.out.println(disallowSnapshot); + System.out.println(addCachePool); + System.out.println(modifyCachePool); + System.out.println(removeCachePool); + System.out.println(listCachePools); + + System.out.println(disallowSnapshot); + System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); @@ -988,6 +1278,18 @@ private static void printUsage(String cmd) { } else if ("-fetchImage".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-fetchImage ]"); + } else if ("-addCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + ADD_CACHE_POOL_USAGE + "]"); + } else if ("-modifyCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + MODIFY_CACHE_POOL_USAGE + "]"); + } else if ("-removeCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + REMOVE_CACHE_POOL_USAGE + "]"); + } else if ("-listCachePools".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + LIST_CACHE_POOLS_USAGE + "]"); } else { System.err.println("Usage: java DFSAdmin"); System.err.println("Note: Administrative commands can only be run as the HDFS superuser."); @@ -1013,6 +1315,10 @@ private static void printUsage(String cmd) { System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); System.err.println(" [-setBalancerBandwidth ]"); System.err.println(" [-fetchImage ]"); + System.err.println(" [" + ADD_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + MODIFY_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + REMOVE_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + LIST_CACHE_POOLS_USAGE + "]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); @@ -1185,6 +1491,14 @@ public int run(String[] argv) throws Exception { exitCode = setBalancerBandwidth(argv, i); } else if ("-fetchImage".equals(cmd)) { exitCode = fetchImage(argv, i); + } else if ("-addCachePool".equals(cmd)) { + exitCode = addCachePool(argv, i); + } else if ("-modifyCachePool".equals(cmd)) { + exitCode = modifyCachePool(argv, i); + } else if ("-removeCachePool".equals(cmd)) { + exitCode = removeCachePool(argv, i); + } else if ("-listCachePools".equals(cmd)) { + exitCode = listCachePools(argv, i); } else if ("-help".equals(cmd)) { if (i < argv.length) { printHelp(argv[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 44d2b32f33c..bd248bc88b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -16356,7 +16356,7 @@ - + Verifying clrSpaceQuota operation is not permitted in safemode -fs NAMENODE -mkdir /test @@ -16374,5 +16374,70 @@ + + + Testing listing no cache pools + + -fs NAMENODE -listCachePools + + + + + + SubstringComparator + No cache pools found. + + + + + + Testing adding a cache pool + + -fs NAMENODE -addCachePool foo + + + -fs NAMENODE -removeCachePool foo + + + + SubstringComparator + Successfully added cache pool foo. + + + + + + Testing deleting a cache pool + + -fs NAMENODE -addCachePool foo + -fs NAMENODE -removeCachePool foo + + + + + + SubstringComparator + Successfully removed cache pool foo. + + + + + + Testing listing a cache pool + + -fs NAMENODE -addCachePool foo -owner bob -group bob -mode 0664 + -fs NAMENODE -listCachePools foo + + + -fs NAMENODE -removeCachePool foo + + + + SubstringComparator + foo + + + +