HDFS-3723. Add support -h, -help to all the commands. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1373170 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-08-15 00:03:19 +00:00
parent f98d8eb291
commit f026d8bb1b
13 changed files with 216 additions and 83 deletions

View File

@ -114,6 +114,9 @@ Trunk (unreleased changes)
HDFS-3789. JournalManager#format() should be able to throw IOException HDFS-3789. JournalManager#format() should be able to throw IOException
(Ivan Kelly via todd) (Ivan Kelly via todd)
HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
suresh)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -18,8 +18,21 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
@ -33,10 +46,17 @@
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.StringTokenizer;
import javax.net.SocketFactory; import javax.net.SocketFactory;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -57,8 +77,7 @@
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.Log; import org.apache.hadoop.util.ToolRunner;
import org.apache.commons.logging.LogFactory;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -424,7 +443,6 @@ private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
* *
* @param conf configuration * @param conf configuration
* @return list of InetSocketAddresses * @return list of InetSocketAddresses
* @throws IOException if no addresses are configured
*/ */
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses( public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
Configuration conf) { Configuration conf) {
@ -1073,4 +1091,44 @@ public static String getOnlyNameServiceIdOrNull(Configuration conf) {
return null; return null;
} }
} }
public static Options helpOptions = new Options();
public static Option helpOpt = new Option("h", "help", false,
"get help information");
static {
helpOptions.addOption(helpOpt);
}
/**
* Parse the arguments for commands
*
* @param args the argument to be parsed
* @param helpDescription help information to be printed out
* @param out Printer
* @param printGenericCommandUsage whether to print the
* generic command usage defined in ToolRunner
* @return true when the argument matches help option, false if not
*/
public static boolean parseHelpArgument(String[] args,
String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
if (args.length == 1) {
try {
CommandLineParser parser = new PosixParser();
CommandLine cmdLine = parser.parse(helpOptions, args);
if (cmdLine.hasOption(helpOpt.getOpt())
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
// should print out the help information
out.println(helpDescription + "\n");
if (printGenericCommandUsage) {
ToolRunner.printGenericCommandUsage(out);
}
return true;
}
} catch (ParseException pe) {
return false;
}
}
return false;
}
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.balancer; package org.apache.hadoop.hdfs.server.balancer;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
@ -26,6 +27,7 @@
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.PrintStream;
import java.net.Socket; import java.net.Socket;
import java.net.URI; import java.net.URI;
import java.text.DateFormat; import java.text.DateFormat;
@ -68,7 +70,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -79,7 +80,6 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import static com.google.common.base.Preconditions.checkArgument;
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster /** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster. * when some datanodes become full or when new empty nodes join the cluster.
@ -189,6 +189,13 @@ public class Balancer {
*/ */
public static final int MAX_NUM_CONCURRENT_MOVES = 5; public static final int MAX_NUM_CONCURRENT_MOVES = 5;
private static final String USAGE = "Usage: java "
+ Balancer.class.getSimpleName()
+ "\n\t[-policy <policy>]\tthe balancing policy: "
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
+ BalancingPolicy.Pool.INSTANCE.getName()
+ "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
private final NameNodeConnector nnc; private final NameNodeConnector nnc;
private final BalancingPolicy policy; private final BalancingPolicy policy;
private final double threshold; private final double threshold;
@ -1550,7 +1557,7 @@ static Parameters parse(String[] args) {
} }
} }
} catch(RuntimeException e) { } catch(RuntimeException e) {
printUsage(); printUsage(System.err);
throw e; throw e;
} }
} }
@ -1558,13 +1565,8 @@ static Parameters parse(String[] args) {
return new Parameters(policy, threshold); return new Parameters(policy, threshold);
} }
private static void printUsage() { private static void printUsage(PrintStream out) {
System.out.println("Usage: java " + Balancer.class.getSimpleName()); out.println(USAGE + "\n");
System.out.println(" [-policy <policy>]\tthe balancing policy: "
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
+ BalancingPolicy.Pool.INSTANCE.getName());
System.out.println(
" [-threshold <threshold>]\tPercentage of disk capacity");
} }
} }
@ -1573,6 +1575,10 @@ private static void printUsage() {
* @param args Command line arguments * @param args Command line arguments
*/ */
public static void main(String[] args) { public static void main(String[] args) {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
try { try {
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
} catch (Throwable e) { } catch (Throwable e) {

View File

@ -46,6 +46,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.BufferedOutputStream; import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
@ -55,6 +56,7 @@
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.PrintStream;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.ServerSocket; import java.net.ServerSocket;
import java.net.Socket; import java.net.Socket;
@ -98,8 +100,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
@ -124,9 +126,6 @@
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.util.ExitUtil.terminate;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@ -171,9 +170,9 @@
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService; import com.google.protobuf.BlockingService;
/********************************************************** /**********************************************************
@ -231,6 +230,8 @@ public class DataNode extends Configured
static final Log ClientTraceLog = static final Log ClientTraceLog =
LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
/** /**
* Use {@link NetUtils#createSocketAddr(String)} instead. * Use {@link NetUtils#createSocketAddr(String)} instead.
*/ */
@ -1541,7 +1542,7 @@ public static DataNode instantiateDataNode(String args [], Configuration conf,
} }
if (!parseArguments(args, conf)) { if (!parseArguments(args, conf)) {
printUsage(); printUsage(System.err);
return null; return null;
} }
Collection<URI> dataDirs = getStorageDirs(conf); Collection<URI> dataDirs = getStorageDirs(conf);
@ -1655,9 +1656,8 @@ public String toString() {
+ xmitsInProgress.get() + "}"; + xmitsInProgress.get() + "}";
} }
private static void printUsage() { private static void printUsage(PrintStream out) {
System.err.println("Usage: java DataNode"); out.println(USAGE + "\n");
System.err.println(" [-rollback]");
} }
/** /**
@ -1742,6 +1742,10 @@ public static void secureMain(String args[], SecureResources resources) {
} }
public static void main(String args[]) { public static void main(String args[]) {
if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
System.exit(0);
}
secureMain(args, null); secureMain(args, null);
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.util.ArrayList; import java.util.ArrayList;
@ -38,6 +39,8 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.Trash;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -69,12 +72,9 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -188,6 +188,22 @@ public static enum OperationCategory {
DFS_HA_AUTO_FAILOVER_ENABLED_KEY DFS_HA_AUTO_FAILOVER_ENABLED_KEY
}; };
private static final String USAGE = "Usage: java NameNode ["
+ StartupOption.BACKUP.getName() + "] | ["
+ StartupOption.CHECKPOINT.getName() + "] | ["
+ StartupOption.FORMAT.getName() + " ["
+ StartupOption.CLUSTERID.getName() + " cid ] ["
+ StartupOption.FORCE.getName() + "] ["
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+ StartupOption.UPGRADE.getName() + "] | ["
+ StartupOption.ROLLBACK.getName() + "] | ["
+ StartupOption.FINALIZE.getName() + "] | ["
+ StartupOption.IMPORT.getName() + "] | ["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
+ StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
+ " ] ]";
public long getProtocolVersion(String protocol, public long getProtocolVersion(String protocol,
long clientVersion) throws IOException { long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) { if (protocol.equals(ClientProtocol.class.getName())) {
@ -895,25 +911,8 @@ private static boolean finalize(Configuration conf,
return false; return false;
} }
private static void printUsage() { private static void printUsage(PrintStream out) {
System.err.println( out.println(USAGE + "\n");
"Usage: java NameNode [" +
StartupOption.BACKUP.getName() + "] | [" +
StartupOption.CHECKPOINT.getName() + "] | [" +
StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +
" cid ] [" + StartupOption.FORCE.getName() + "] [" +
StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
StartupOption.UPGRADE.getName() + "] | [" +
StartupOption.ROLLBACK.getName() + "] | [" +
StartupOption.FINALIZE.getName() + "] | [" +
StartupOption.IMPORT.getName() + "] | [" +
StartupOption.INITIALIZESHAREDEDITS.getName() +
" [" + StartupOption.FORCE.getName() + "] [" +
StartupOption.NONINTERACTIVE.getName() + "]" +
"] | [" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
StartupOption.RECOVER.getName() + " [ " +
StartupOption.FORCE.getName() + " ] ]");
} }
private static StartupOption parseArguments(String args[]) { private static StartupOption parseArguments(String args[]) {
@ -1061,7 +1060,7 @@ public static NameNode createNameNode(String argv[], Configuration conf)
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
StartupOption startOpt = parseArguments(argv); StartupOption startOpt = parseArguments(argv);
if (startOpt == null) { if (startOpt == null) {
printUsage(); printUsage(System.err);
return null; return null;
} }
setStartupOption(conf, startOpt); setStartupOption(conf, startOpt);
@ -1175,6 +1174,10 @@ protected String getNameServiceId(Configuration conf) {
/** /**
*/ */
public static void main(String argv[]) throws Exception { public static void main(String argv[]) throws Exception {
if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
System.exit(0);
}
try { try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
NameNode namenode = createNameNode(argv, null); NameNode namenode = createNameNode(argv, null);

View File

@ -562,6 +562,9 @@ public static void main(String[] argv) throws Exception {
if (opts == null) { if (opts == null) {
LOG.fatal("Failed to parse options"); LOG.fatal("Failed to parse options");
terminate(1); terminate(1);
} else if (opts.shouldPrintHelp()) {
opts.usage();
System.exit(0);
} }
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@ -595,6 +598,7 @@ static class CommandLineOpts {
private final Option geteditsizeOpt; private final Option geteditsizeOpt;
private final Option checkpointOpt; private final Option checkpointOpt;
private final Option formatOpt; private final Option formatOpt;
private final Option helpOpt;
Command cmd; Command cmd;
@ -605,6 +609,7 @@ enum Command {
private boolean shouldForce; private boolean shouldForce;
private boolean shouldFormat; private boolean shouldFormat;
private boolean shouldPrintHelp;
CommandLineOpts() { CommandLineOpts() {
geteditsizeOpt = new Option("geteditsize", geteditsizeOpt = new Option("geteditsize",
@ -612,20 +617,32 @@ enum Command {
checkpointOpt = OptionBuilder.withArgName("force") checkpointOpt = OptionBuilder.withArgName("force")
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");; .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
formatOpt = new Option("format", "format the local storage during startup"); formatOpt = new Option("format", "format the local storage during startup");
helpOpt = new Option("h", "help", false, "get help information");
options.addOption(geteditsizeOpt); options.addOption(geteditsizeOpt);
options.addOption(checkpointOpt); options.addOption(checkpointOpt);
options.addOption(formatOpt); options.addOption(formatOpt);
options.addOption(helpOpt);
} }
public boolean shouldFormat() { public boolean shouldFormat() {
return shouldFormat; return shouldFormat;
} }
public boolean shouldPrintHelp() {
return shouldPrintHelp;
}
public void parse(String ... argv) throws ParseException { public void parse(String ... argv) throws ParseException {
CommandLineParser parser = new PosixParser(); CommandLineParser parser = new PosixParser();
CommandLine cmdLine = parser.parse(options, argv); CommandLine cmdLine = parser.parse(options, argv);
if (cmdLine.hasOption(helpOpt.getOpt())
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
shouldPrintHelp = true;
return;
}
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt()); boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt()); boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
if (hasGetEdit && hasCheckpoint) { if (hasGetEdit && hasCheckpoint) {
@ -662,8 +679,13 @@ public boolean shouldForceCheckpoint() {
} }
void usage() { void usage() {
String header = "The Secondary NameNode is a helper "
+ "to the primary NameNode. The Secondary is responsible "
+ "for supporting periodic checkpoints of the HDFS metadata. "
+ "The current design allows only one Secondary NameNode "
+ "per HDFS cluster.";
HelpFormatter formatter = new HelpFormatter(); HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("secondarynamenode", options); formatter.printHelp("secondarynamenode", header, options, "", false);
} }
} }

View File

@ -43,6 +43,10 @@ protected void setErrOut(PrintStream errOut) {
this.errOut = errOut; this.errOut = errOut;
} }
protected void setOut(PrintStream out) {
this.out = out;
}
@Override @Override
public void setConf(Configuration conf) { public void setConf(Configuration conf) {
if (conf != null) { if (conf != null) {

View File

@ -162,6 +162,10 @@ protected String getScopeInsideParentNode() {
public static void main(String args[]) public static void main(String args[])
throws Exception { throws Exception {
if (DFSUtil.parseHelpArgument(args,
ZKFailoverController.USAGE, System.out, true)) {
System.exit(0);
}
GenericOptionsParser parser = new GenericOptionsParser( GenericOptionsParser parser = new GenericOptionsParser(
new HdfsConfiguration(), args); new HdfsConfiguration(), args);

View File

@ -73,6 +73,25 @@ public class DFSck extends Configured implements Tool {
HdfsConfiguration.init(); HdfsConfiguration.init();
} }
private static final String USAGE = "Usage: DFSck <path> "
+ "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] "
+ "[-files [-blocks [-locations | -racks]]]]\n"
+ "\t<path>\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n"
+ "\t-files\tprint out files being checked\n"
+ "\t-openforwrite\tprint out files opened for write\n"
+ "\t-list-corruptfileblocks\tprint out list of missing "
+ "blocks and files they belong to\n"
+ "\t-blocks\tprint out block report\n"
+ "\t-locations\tprint out locations for every block\n"
+ "\t-racks\tprint out network topology for data-node locations\n"
+ "\t\tBy default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually "
+ " tagged CORRUPT or HEALTHY depending on their block "
+ "allocation status";
private final UserGroupInformation ugi; private final UserGroupInformation ugi;
private final PrintStream out; private final PrintStream out;
@ -93,25 +112,9 @@ public DFSck(Configuration conf, PrintStream out) throws IOException {
/** /**
* Print fsck usage information * Print fsck usage information
*/ */
static void printUsage() { static void printUsage(PrintStream out) {
System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " + out.println(USAGE + "\n");
"[-move | -delete | -openforwrite] " + ToolRunner.printGenericCommandUsage(out);
"[-files [-blocks [-locations | -racks]]]]");
System.err.println("\t<path>\tstart checking from this path");
System.err.println("\t-move\tmove corrupted files to /lost+found");
System.err.println("\t-delete\tdelete corrupted files");
System.err.println("\t-files\tprint out files being checked");
System.err.println("\t-openforwrite\tprint out files opened for write");
System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
+ "blocks and files they belong to");
System.err.println("\t-blocks\tprint out block report");
System.err.println("\t-locations\tprint out locations for every block");
System.err.println("\t-racks\tprint out network topology for data-node locations");
System.err.println("\t\tBy default fsck ignores files opened for write, " +
"use -openforwrite to report such files. They are usually " +
" tagged CORRUPT or HEALTHY depending on their block " +
"allocation status");
ToolRunner.printGenericCommandUsage(System.err);
} }
/** /**
* @param args * @param args
@ -119,7 +122,7 @@ static void printUsage() {
@Override @Override
public int run(final String[] args) throws IOException { public int run(final String[] args) throws IOException {
if (args.length == 0) { if (args.length == 0) {
printUsage(); printUsage(System.err);
return -1; return -1;
} }
@ -258,12 +261,12 @@ else if (args[idx].equals("-list-corruptfileblocks")) {
} else { } else {
System.err.println("fsck: can only operate on one path at a time '" System.err.println("fsck: can only operate on one path at a time '"
+ args[idx] + "'"); + args[idx] + "'");
printUsage(); printUsage(System.err);
return -1; return -1;
} }
} else { } else {
System.err.println("fsck: Illegal option '" + args[idx] + "'"); System.err.println("fsck: Illegal option '" + args[idx] + "'");
printUsage(); printUsage(System.err);
return -1; return -1;
} }
} }
@ -304,10 +307,14 @@ public static void main(String[] args) throws Exception {
// -files option is also used by GenericOptionsParser // -files option is also used by GenericOptionsParser
// Make sure that is not the first argument for fsck // Make sure that is not the first argument for fsck
int res = -1; int res = -1;
if ((args.length == 0 ) || ("-files".equals(args[0]))) if ((args.length == 0) || ("-files".equals(args[0]))) {
printUsage(); printUsage(System.err);
else ToolRunner.printGenericCommandUsage(System.err);
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
res = 0;
} else {
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args); res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
}
System.exit(res); System.exit(res);
} }
} }

View File

@ -40,7 +40,6 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -48,9 +47,7 @@
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
@ -71,8 +68,10 @@ public class DelegationTokenFetcher {
private static final String CANCEL = "cancel"; private static final String CANCEL = "cancel";
private static final String RENEW = "renew"; private static final String RENEW = "renew";
private static final String PRINT = "print"; private static final String PRINT = "print";
private static final String HELP = "help";
private static final String HELP_SHORT = "h";
private static void printUsage(PrintStream err) throws IOException { private static void printUsage(PrintStream err) {
err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println("fetchdt retrieves delegation tokens from the NameNode");
err.println(); err.println();
err.println("fetchdt <opts> <token file>"); err.println("fetchdt <opts> <token file>");
@ -107,6 +106,7 @@ public static void main(final String[] args) throws Exception {
fetcherOptions.addOption(CANCEL, false, "cancel the token"); fetcherOptions.addOption(CANCEL, false, "cancel the token");
fetcherOptions.addOption(RENEW, false, "renew the token"); fetcherOptions.addOption(RENEW, false, "renew the token");
fetcherOptions.addOption(PRINT, false, "print the token"); fetcherOptions.addOption(PRINT, false, "print the token");
fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
GenericOptionsParser parser = new GenericOptionsParser(conf, GenericOptionsParser parser = new GenericOptionsParser(conf,
fetcherOptions, args); fetcherOptions, args);
CommandLine cmd = parser.getCommandLine(); CommandLine cmd = parser.getCommandLine();
@ -119,9 +119,14 @@ public static void main(final String[] args) throws Exception {
final boolean cancel = cmd.hasOption(CANCEL); final boolean cancel = cmd.hasOption(CANCEL);
final boolean renew = cmd.hasOption(RENEW); final boolean renew = cmd.hasOption(RENEW);
final boolean print = cmd.hasOption(PRINT); final boolean print = cmd.hasOption(PRINT);
final boolean help = cmd.hasOption(HELP);
String[] remaining = parser.getRemainingArgs(); String[] remaining = parser.getRemainingArgs();
// check option validity // check option validity
if (help) {
printUsage(System.out);
System.exit(0);
}
if (cancel && renew || cancel && print || renew && print || cancel && renew if (cancel && renew || cancel && print || renew && print || cancel && renew
&& print) { && print) {
System.err.println("ERROR: Only specify cancel, renew or print."); System.err.println("ERROR: Only specify cancel, renew or print.");

View File

@ -324,6 +324,10 @@ public Integer run() throws Exception {
} }
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args); int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
System.exit(res); System.exit(res);
} }

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -44,6 +45,8 @@ public class GetGroups extends GetGroupsBase {
private static final Log LOG = LogFactory.getLog(GetGroups.class); private static final Log LOG = LogFactory.getLog(GetGroups.class);
static final String USAGE = "Usage: hdfs groups [username ...]";
static{ static{
HdfsConfiguration.init(); HdfsConfiguration.init();
} }
@ -86,6 +89,10 @@ protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
} }
public static void main(String[] argv) throws Exception { public static void main(String[] argv) throws Exception {
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
System.exit(0);
}
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv); int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
System.exit(res); System.exit(res);
} }

View File

@ -55,7 +55,9 @@ public class TestDFSHAAdmin {
private DFSHAAdmin tool; private DFSHAAdmin tool;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
private String errOutput; private String errOutput;
private String output;
private HAServiceProtocol mockProtocol; private HAServiceProtocol mockProtocol;
private ZKFCProtocol mockZkfcProtocol; private ZKFCProtocol mockZkfcProtocol;
@ -111,12 +113,14 @@ protected HAServiceTarget resolveTarget(String nnId) {
}; };
tool.setConf(getHAConf()); tool.setConf(getHAConf());
tool.setErrOut(new PrintStream(errOutBytes)); tool.setErrOut(new PrintStream(errOutBytes));
tool.setOut(new PrintStream(outBytes));
} }
private void assertOutputContains(String string) { private void assertOutputContains(String string) {
if (!errOutput.contains(string)) { if (!errOutput.contains(string) && !output.contains(string)) {
fail("Expected output to contain '" + string + "' but was:\n" + fail("Expected output to contain '" + string +
errOutput); "' but err_output was:\n" + errOutput +
"\n and output was: \n" + output);
} }
} }
@ -143,7 +147,7 @@ public void testNamenodeResolution() throws Exception {
@Test @Test
public void testHelp() throws Exception { public void testHelp() throws Exception {
assertEquals(-1, runTool("-help")); assertEquals(0, runTool("-help"));
assertEquals(0, runTool("-help", "transitionToActive")); assertEquals(0, runTool("-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active"); assertOutputContains("Transitions the service into Active");
} }
@ -378,10 +382,12 @@ public void testFencingConfigPerNameNode() throws Exception {
private Object runTool(String ... args) throws Exception { private Object runTool(String ... args) throws Exception {
errOutBytes.reset(); errOutBytes.reset();
outBytes.reset();
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args); int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
LOG.info("Output:\n" + errOutput); output = new String(outBytes.toByteArray(), Charsets.UTF_8);
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
return ret; return ret;
} }