diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 64a4fdf568e..ae2d2662586 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -114,9 +114,6 @@ Trunk (unreleased changes) HDFS-3789. JournalManager#format() should be able to throw IOException (Ivan Kelly via todd) - HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via - suresh) - OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index d263acd5906..8d59ae65886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -18,21 +18,8 @@ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; - +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import java.io.IOException; -import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; import java.net.URI; @@ -46,17 +33,10 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.StringTokenizer; import javax.net.SocketFactory; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.cli.PosixParser; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -77,7 +57,8 @@ import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.ToolRunner; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import com.google.common.base.Joiner; import com.google.common.collect.Lists; @@ -443,6 +424,7 @@ private static Map getAddressesForNameserviceId( * * @param conf configuration * @return list of InetSocketAddresses + * @throws IOException if no addresses are configured */ public static Map> getHaNnRpcAddresses( Configuration conf) { @@ -1091,44 +1073,4 @@ public static String getOnlyNameServiceIdOrNull(Configuration conf) { return null; } } - - public static Options helpOptions = new Options(); - public static Option helpOpt = new Option("h", "help", false, - "get help information"); - - static { - helpOptions.addOption(helpOpt); - } - - /** - * Parse the arguments for commands - * - * @param args the argument to be parsed - * @param helpDescription help information to be printed out - * @param out Printer - * @param printGenericCommandUsage whether to print the - * generic command usage defined in ToolRunner - * @return true when the argument matches help option, false if not - */ - public static boolean parseHelpArgument(String[] args, - String helpDescription, PrintStream out, boolean printGenericCommandUsage) { - if (args.length == 1) { - try { - CommandLineParser parser = new PosixParser(); - CommandLine cmdLine = parser.parse(helpOptions, args); - if (cmdLine.hasOption(helpOpt.getOpt()) - || cmdLine.hasOption(helpOpt.getLongOpt())) { - // should print out the help information - out.println(helpDescription + "\n"); - if (printGenericCommandUsage) { - ToolRunner.printGenericCommandUsage(out); - } - return true; - } - } catch (ParseException pe) { - return false; - } - } - return false; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 577d73b76be..f949c924a1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.balancer; -import static com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; import java.io.BufferedInputStream; @@ -27,7 +26,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.PrintStream; import java.net.Socket; import java.net.URI; import java.text.DateFormat; @@ -70,6 +68,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.io.IOUtils; @@ -80,6 +79,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import static com.google.common.base.Preconditions.checkArgument; /**

The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. @@ -189,13 +189,6 @@ public class Balancer { */ public static final int MAX_NUM_CONCURRENT_MOVES = 5; - private static final String USAGE = "Usage: java " - + Balancer.class.getSimpleName() - + "\n\t[-policy ]\tthe balancing policy: " - + BalancingPolicy.Node.INSTANCE.getName() + " or " - + BalancingPolicy.Pool.INSTANCE.getName() - + "\n\t[-threshold ]\tPercentage of disk capacity"; - private final NameNodeConnector nnc; private final BalancingPolicy policy; private final double threshold; @@ -1557,7 +1550,7 @@ static Parameters parse(String[] args) { } } } catch(RuntimeException e) { - printUsage(System.err); + printUsage(); throw e; } } @@ -1565,8 +1558,13 @@ static Parameters parse(String[] args) { return new Parameters(policy, threshold); } - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); + private static void printUsage() { + System.out.println("Usage: java " + Balancer.class.getSimpleName()); + System.out.println(" [-policy ]\tthe balancing policy: " + + BalancingPolicy.Node.INSTANCE.getName() + " or " + + BalancingPolicy.Pool.INSTANCE.getName()); + System.out.println( + " [-threshold ]\tPercentage of disk capacity"); } } @@ -1575,10 +1573,6 @@ private static void printUsage(PrintStream out) { * @param args Command line arguments */ public static void main(String[] args) { - if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { - System.exit(0); - } - try { System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 83219cca48b..a456d133d4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -46,7 +46,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; -import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.BufferedOutputStream; import java.io.ByteArrayInputStream; @@ -56,7 +55,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; @@ -100,8 +98,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; @@ -126,6 +124,9 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Util; + +import static org.apache.hadoop.util.ExitUtil.terminate; + import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -170,9 +171,9 @@ import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; /********************************************************** @@ -229,8 +230,6 @@ public class DataNode extends Configured static final Log ClientTraceLog = LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); - - private static final String USAGE = "Usage: java DataNode [-rollback | -regular]"; /** * Use {@link NetUtils#createSocketAddr(String)} instead. @@ -1542,7 +1541,7 @@ public static DataNode instantiateDataNode(String args [], Configuration conf, } if (!parseArguments(args, conf)) { - printUsage(System.err); + printUsage(); return null; } Collection dataDirs = getStorageDirs(conf); @@ -1656,8 +1655,9 @@ public String toString() { + xmitsInProgress.get() + "}"; } - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); + private static void printUsage() { + System.err.println("Usage: java DataNode"); + System.err.println(" [-rollback]"); } /** @@ -1742,10 +1742,6 @@ public static void secureMain(String args[], SecureResources resources) { } public static void main(String args[]) { - if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) { - System.exit(0); - } - secureMain(args, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 2df693b3c4e..20e7aafba9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; @@ -39,8 +38,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.apache.hadoop.util.ExitUtil.terminate; -import static org.apache.hadoop.util.ToolRunner.confirmPrompt; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -72,9 +69,12 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; -import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.ExitUtil.ExitException; + +import static org.apache.hadoop.util.ExitUtil.terminate; +import static org.apache.hadoop.util.ToolRunner.confirmPrompt; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -188,22 +188,6 @@ public static enum OperationCategory { DFS_HA_AUTO_FAILOVER_ENABLED_KEY }; - private static final String USAGE = "Usage: java NameNode [" - + StartupOption.BACKUP.getName() + "] | [" - + StartupOption.CHECKPOINT.getName() + "] | [" - + StartupOption.FORMAT.getName() + " [" - + StartupOption.CLUSTERID.getName() + " cid ] [" - + StartupOption.FORCE.getName() + "] [" - + StartupOption.NONINTERACTIVE.getName() + "] ] | [" - + StartupOption.UPGRADE.getName() + "] | [" - + StartupOption.ROLLBACK.getName() + "] | [" - + StartupOption.FINALIZE.getName() + "] | [" - + StartupOption.IMPORT.getName() + "] | [" - + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | [" - + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" - + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName() - + " ] ]"; - public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { @@ -911,8 +895,25 @@ private static boolean finalize(Configuration conf, return false; } - private static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); + private static void printUsage() { + System.err.println( + "Usage: java NameNode [" + + StartupOption.BACKUP.getName() + "] | [" + + StartupOption.CHECKPOINT.getName() + "] | [" + + StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() + + " cid ] [" + StartupOption.FORCE.getName() + "] [" + + StartupOption.NONINTERACTIVE.getName() + "] ] | [" + + StartupOption.UPGRADE.getName() + "] | [" + + StartupOption.ROLLBACK.getName() + "] | [" + + StartupOption.FINALIZE.getName() + "] | [" + + StartupOption.IMPORT.getName() + "] | [" + + StartupOption.INITIALIZESHAREDEDITS.getName() + + " [" + StartupOption.FORCE.getName() + "] [" + + StartupOption.NONINTERACTIVE.getName() + "]" + + "] | [" + + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + + StartupOption.RECOVER.getName() + " [ " + + StartupOption.FORCE.getName() + " ] ]"); } private static StartupOption parseArguments(String args[]) { @@ -1060,7 +1061,7 @@ public static NameNode createNameNode(String argv[], Configuration conf) conf = new HdfsConfiguration(); StartupOption startOpt = parseArguments(argv); if (startOpt == null) { - printUsage(System.err); + printUsage(); return null; } setStartupOption(conf, startOpt); @@ -1174,10 +1175,6 @@ protected String getNameServiceId(Configuration conf) { /** */ public static void main(String argv[]) throws Exception { - if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) { - System.exit(0); - } - try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 47d09ef993c..8057955dfac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -562,9 +562,6 @@ public static void main(String[] argv) throws Exception { if (opts == null) { LOG.fatal("Failed to parse options"); terminate(1); - } else if (opts.shouldPrintHelp()) { - opts.usage(); - System.exit(0); } StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); @@ -598,7 +595,6 @@ static class CommandLineOpts { private final Option geteditsizeOpt; private final Option checkpointOpt; private final Option formatOpt; - private final Option helpOpt; Command cmd; @@ -609,7 +605,6 @@ enum Command { private boolean shouldForce; private boolean shouldFormat; - private boolean shouldPrintHelp; CommandLineOpts() { geteditsizeOpt = new Option("geteditsize", @@ -617,32 +612,20 @@ enum Command { checkpointOpt = OptionBuilder.withArgName("force") .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");; formatOpt = new Option("format", "format the local storage during startup"); - helpOpt = new Option("h", "help", false, "get help information"); options.addOption(geteditsizeOpt); options.addOption(checkpointOpt); options.addOption(formatOpt); - options.addOption(helpOpt); } public boolean shouldFormat() { return shouldFormat; } - public boolean shouldPrintHelp() { - return shouldPrintHelp; - } - public void parse(String ... argv) throws ParseException { CommandLineParser parser = new PosixParser(); CommandLine cmdLine = parser.parse(options, argv); - if (cmdLine.hasOption(helpOpt.getOpt()) - || cmdLine.hasOption(helpOpt.getLongOpt())) { - shouldPrintHelp = true; - return; - } - boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt()); boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt()); if (hasGetEdit && hasCheckpoint) { @@ -679,13 +662,8 @@ public boolean shouldForceCheckpoint() { } void usage() { - String header = "The Secondary NameNode is a helper " - + "to the primary NameNode. The Secondary is responsible " - + "for supporting periodic checkpoints of the HDFS metadata. " - + "The current design allows only one Secondary NameNode " - + "per HDFS cluster."; HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("secondarynamenode", header, options, "", false); + formatter.printHelp("secondarynamenode", options); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index 47c852d5963..d4397276ea1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -42,10 +42,6 @@ public class DFSHAAdmin extends HAAdmin { protected void setErrOut(PrintStream errOut) { this.errOut = errOut; } - - protected void setOut(PrintStream out) { - this.out = out; - } @Override public void setConf(Configuration conf) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index e18c9a86ad0..b1163d6885b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -162,10 +162,6 @@ protected String getScopeInsideParentNode() { public static void main(String args[]) throws Exception { - if (DFSUtil.parseHelpArgument(args, - ZKFailoverController.USAGE, System.out, true)) { - System.exit(0); - } GenericOptionsParser parser = new GenericOptionsParser( new HdfsConfiguration(), args); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index c3238f0de30..566d77a5fbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -73,25 +73,6 @@ public class DFSck extends Configured implements Tool { HdfsConfiguration.init(); } - private static final String USAGE = "Usage: DFSck " - + "[-list-corruptfileblocks | " - + "[-move | -delete | -openforwrite] " - + "[-files [-blocks [-locations | -racks]]]]\n" - + "\t\tstart checking from this path\n" - + "\t-move\tmove corrupted files to /lost+found\n" - + "\t-delete\tdelete corrupted files\n" - + "\t-files\tprint out files being checked\n" - + "\t-openforwrite\tprint out files opened for write\n" - + "\t-list-corruptfileblocks\tprint out list of missing " - + "blocks and files they belong to\n" - + "\t-blocks\tprint out block report\n" - + "\t-locations\tprint out locations for every block\n" - + "\t-racks\tprint out network topology for data-node locations\n" - + "\t\tBy default fsck ignores files opened for write, " - + "use -openforwrite to report such files. They are usually " - + " tagged CORRUPT or HEALTHY depending on their block " - + "allocation status"; - private final UserGroupInformation ugi; private final PrintStream out; @@ -112,9 +93,25 @@ public DFSck(Configuration conf, PrintStream out) throws IOException { /** * Print fsck usage information */ - static void printUsage(PrintStream out) { - out.println(USAGE + "\n"); - ToolRunner.printGenericCommandUsage(out); + static void printUsage() { + System.err.println("Usage: DFSck [-list-corruptfileblocks | " + + "[-move | -delete | -openforwrite] " + + "[-files [-blocks [-locations | -racks]]]]"); + System.err.println("\t\tstart checking from this path"); + System.err.println("\t-move\tmove corrupted files to /lost+found"); + System.err.println("\t-delete\tdelete corrupted files"); + System.err.println("\t-files\tprint out files being checked"); + System.err.println("\t-openforwrite\tprint out files opened for write"); + System.err.println("\t-list-corruptfileblocks\tprint out list of missing " + + "blocks and files they belong to"); + System.err.println("\t-blocks\tprint out block report"); + System.err.println("\t-locations\tprint out locations for every block"); + System.err.println("\t-racks\tprint out network topology for data-node locations"); + System.err.println("\t\tBy default fsck ignores files opened for write, " + + "use -openforwrite to report such files. They are usually " + + " tagged CORRUPT or HEALTHY depending on their block " + + "allocation status"); + ToolRunner.printGenericCommandUsage(System.err); } /** * @param args @@ -122,7 +119,7 @@ static void printUsage(PrintStream out) { @Override public int run(final String[] args) throws IOException { if (args.length == 0) { - printUsage(System.err); + printUsage(); return -1; } @@ -261,12 +258,12 @@ else if (args[idx].equals("-list-corruptfileblocks")) { } else { System.err.println("fsck: can only operate on one path at a time '" + args[idx] + "'"); - printUsage(System.err); + printUsage(); return -1; } } else { System.err.println("fsck: Illegal option '" + args[idx] + "'"); - printUsage(System.err); + printUsage(); return -1; } } @@ -307,14 +304,10 @@ public static void main(String[] args) throws Exception { // -files option is also used by GenericOptionsParser // Make sure that is not the first argument for fsck int res = -1; - if ((args.length == 0) || ("-files".equals(args[0]))) { - printUsage(System.err); - ToolRunner.printGenericCommandUsage(System.err); - } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { - res = 0; - } else { + if ((args.length == 0 ) || ("-files".equals(args[0]))) + printUsage(); + else res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args); - } System.exit(res); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index f74b4e88962..e0935d475cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -47,7 +48,9 @@ import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; +import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; @@ -68,10 +71,8 @@ public class DelegationTokenFetcher { private static final String CANCEL = "cancel"; private static final String RENEW = "renew"; private static final String PRINT = "print"; - private static final String HELP = "help"; - private static final String HELP_SHORT = "h"; - private static void printUsage(PrintStream err) { + private static void printUsage(PrintStream err) throws IOException { err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println(); err.println("fetchdt "); @@ -106,7 +107,6 @@ public static void main(final String[] args) throws Exception { fetcherOptions.addOption(CANCEL, false, "cancel the token"); fetcherOptions.addOption(RENEW, false, "renew the token"); fetcherOptions.addOption(PRINT, false, "print the token"); - fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information"); GenericOptionsParser parser = new GenericOptionsParser(conf, fetcherOptions, args); CommandLine cmd = parser.getCommandLine(); @@ -119,14 +119,9 @@ public static void main(final String[] args) throws Exception { final boolean cancel = cmd.hasOption(CANCEL); final boolean renew = cmd.hasOption(RENEW); final boolean print = cmd.hasOption(PRINT); - final boolean help = cmd.hasOption(HELP); String[] remaining = parser.getRemainingArgs(); // check option validity - if (help) { - printUsage(System.out); - System.exit(0); - } if (cancel && renew || cancel && print || renew && print || cancel && renew && print) { System.err.println("ERROR: Only specify cancel, renew or print."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java index 778ac59ee25..adf3293edf5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java @@ -324,10 +324,6 @@ public Integer run() throws Exception { } public static void main(String[] args) throws Exception { - if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { - System.exit(0); - } - int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args); System.exit(res); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java index 49d56d534fb..c0e415a8433 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -44,8 +43,6 @@ public class GetGroups extends GetGroupsBase { private static final Log LOG = LogFactory.getLog(GetGroups.class); - - static final String USAGE = "Usage: hdfs groups [username ...]"; static{ HdfsConfiguration.init(); @@ -89,10 +86,6 @@ protected GetUserMappingsProtocol getUgmProtocol() throws IOException { } public static void main(String[] argv) throws Exception { - if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { - System.exit(0); - } - int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv); System.exit(res); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 666e52b484f..61e8ebef5c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -55,9 +55,7 @@ public class TestDFSHAAdmin { private DFSHAAdmin tool; private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); - private ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); private String errOutput; - private String output; private HAServiceProtocol mockProtocol; private ZKFCProtocol mockZkfcProtocol; @@ -113,14 +111,12 @@ protected HAServiceTarget resolveTarget(String nnId) { }; tool.setConf(getHAConf()); tool.setErrOut(new PrintStream(errOutBytes)); - tool.setOut(new PrintStream(outBytes)); } private void assertOutputContains(String string) { - if (!errOutput.contains(string) && !output.contains(string)) { - fail("Expected output to contain '" + string + - "' but err_output was:\n" + errOutput + - "\n and output was: \n" + output); + if (!errOutput.contains(string)) { + fail("Expected output to contain '" + string + "' but was:\n" + + errOutput); } } @@ -147,7 +143,7 @@ public void testNamenodeResolution() throws Exception { @Test public void testHelp() throws Exception { - assertEquals(0, runTool("-help")); + assertEquals(-1, runTool("-help")); assertEquals(0, runTool("-help", "transitionToActive")); assertOutputContains("Transitions the service into Active"); } @@ -382,12 +378,10 @@ public void testFencingConfigPerNameNode() throws Exception { private Object runTool(String ... args) throws Exception { errOutBytes.reset(); - outBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); - output = new String(outBytes.toByteArray(), Charsets.UTF_8); - LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output); + LOG.info("Output:\n" + errOutput); return ret; }