HDFS-3723. Merging change r1373173 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1373185 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c738a9964b
commit
34e596611f
|
@ -88,7 +88,7 @@ public abstract class HAAdmin extends Configured implements Tool {
|
||||||
|
|
||||||
/** Output stream for errors, for use in tests */
|
/** Output stream for errors, for use in tests */
|
||||||
protected PrintStream errOut = System.err;
|
protected PrintStream errOut = System.err;
|
||||||
PrintStream out = System.out;
|
protected PrintStream out = System.out;
|
||||||
private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
|
private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
|
||||||
|
|
||||||
protected abstract HAServiceTarget resolveTarget(String string);
|
protected abstract HAServiceTarget resolveTarget(String string);
|
||||||
|
@ -439,7 +439,10 @@ public abstract class HAAdmin extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private int help(String[] argv) {
|
private int help(String[] argv) {
|
||||||
if (argv.length != 2) {
|
if (argv.length == 1) { // only -help
|
||||||
|
printUsage(out);
|
||||||
|
return 0;
|
||||||
|
} else if (argv.length != 2) {
|
||||||
printUsage(errOut, "-help");
|
printUsage(errOut, "-help");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -454,7 +457,7 @@ public abstract class HAAdmin extends Configured implements Tool {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
errOut.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
|
out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,8 @@ public abstract class ZKFailoverController {
|
||||||
ZK_AUTH_KEY
|
ZK_AUTH_KEY
|
||||||
};
|
};
|
||||||
|
|
||||||
|
protected static final String USAGE =
|
||||||
|
"Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ]";
|
||||||
|
|
||||||
/** Unable to format the parent znode in ZK */
|
/** Unable to format the parent znode in ZK */
|
||||||
static final int ERR_CODE_FORMAT_DENIED = 2;
|
static final int ERR_CODE_FORMAT_DENIED = 2;
|
||||||
|
@ -248,8 +250,7 @@ public abstract class ZKFailoverController {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void printUsage() {
|
private void printUsage() {
|
||||||
System.err.println("Usage: " + this.getClass().getSimpleName() +
|
System.err.println(USAGE + "\n");
|
||||||
" [-formatZK [-force | -nonInteractive]]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private int formatZK(boolean force, boolean interactive)
|
private int formatZK(boolean force, boolean interactive)
|
||||||
|
|
|
@ -40,7 +40,9 @@ public class TestHAAdmin {
|
||||||
|
|
||||||
private HAAdmin tool;
|
private HAAdmin tool;
|
||||||
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
||||||
|
private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
|
||||||
private String errOutput;
|
private String errOutput;
|
||||||
|
private String output;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
|
@ -53,12 +55,14 @@ public class TestHAAdmin {
|
||||||
};
|
};
|
||||||
tool.setConf(new Configuration());
|
tool.setConf(new Configuration());
|
||||||
tool.errOut = new PrintStream(errOutBytes);
|
tool.errOut = new PrintStream(errOutBytes);
|
||||||
|
tool.out = new PrintStream(outBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertOutputContains(String string) {
|
private void assertOutputContains(String string) {
|
||||||
if (!errOutput.contains(string)) {
|
if (!errOutput.contains(string) && !output.contains(string)) {
|
||||||
fail("Expected output to contain '" + string + "' but was:\n" +
|
fail("Expected output to contain '" + string +
|
||||||
errOutput);
|
"' but err_output was:\n" + errOutput +
|
||||||
|
"\n and output was: \n" + output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,17 +92,19 @@ public class TestHAAdmin {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHelp() throws Exception {
|
public void testHelp() throws Exception {
|
||||||
assertEquals(-1, runTool("-help"));
|
assertEquals(0, runTool("-help"));
|
||||||
assertEquals(0, runTool("-help", "transitionToActive"));
|
assertEquals(0, runTool("-help", "transitionToActive"));
|
||||||
assertOutputContains("Transitions the service into Active");
|
assertOutputContains("Transitions the service into Active");
|
||||||
}
|
}
|
||||||
|
|
||||||
private Object runTool(String ... args) throws Exception {
|
private Object runTool(String ... args) throws Exception {
|
||||||
errOutBytes.reset();
|
errOutBytes.reset();
|
||||||
|
outBytes.reset();
|
||||||
LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args));
|
LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args));
|
||||||
int ret = tool.run(args);
|
int ret = tool.run(args);
|
||||||
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
||||||
LOG.info("Output:\n" + errOutput);
|
output = new String(outBytes.toByteArray(), Charsets.UTF_8);
|
||||||
|
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,6 +203,9 @@ Release 2.0.1-alpha - UNRELEASED
|
||||||
HDFS-3765. namenode -initializeSharedEdits should be able to initialize
|
HDFS-3765. namenode -initializeSharedEdits should be able to initialize
|
||||||
all shared storages. (Vinay and todd via todd)
|
all shared storages. (Vinay and todd via todd)
|
||||||
|
|
||||||
|
HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
|
||||||
|
suresh)
|
||||||
|
|
||||||
HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
|
HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
|
||||||
(Andrew Purtell via suresh)
|
(Andrew Purtell via suresh)
|
||||||
|
|
||||||
|
|
|
@ -18,8 +18,21 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.PrintStream;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -34,11 +47,18 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.StringTokenizer;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
|
import org.apache.commons.cli.CommandLine;
|
||||||
|
import org.apache.commons.cli.CommandLineParser;
|
||||||
|
import org.apache.commons.cli.Option;
|
||||||
|
import org.apache.commons.cli.Options;
|
||||||
|
import org.apache.commons.cli.ParseException;
|
||||||
|
import org.apache.commons.cli.PosixParser;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -63,8 +83,7 @@ import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
@ -430,7 +449,6 @@ public class DFSUtil {
|
||||||
*
|
*
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return list of InetSocketAddresses
|
* @return list of InetSocketAddresses
|
||||||
* @throws IOException if no addresses are configured
|
|
||||||
*/
|
*/
|
||||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
||||||
Configuration conf) {
|
Configuration conf) {
|
||||||
|
@ -1079,4 +1097,44 @@ public class DFSUtil {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Options helpOptions = new Options();
|
||||||
|
public static Option helpOpt = new Option("h", "help", false,
|
||||||
|
"get help information");
|
||||||
|
|
||||||
|
static {
|
||||||
|
helpOptions.addOption(helpOpt);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse the arguments for commands
|
||||||
|
*
|
||||||
|
* @param args the argument to be parsed
|
||||||
|
* @param helpDescription help information to be printed out
|
||||||
|
* @param out Printer
|
||||||
|
* @param printGenericCommandUsage whether to print the
|
||||||
|
* generic command usage defined in ToolRunner
|
||||||
|
* @return true when the argument matches help option, false if not
|
||||||
|
*/
|
||||||
|
public static boolean parseHelpArgument(String[] args,
|
||||||
|
String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
|
||||||
|
if (args.length == 1) {
|
||||||
|
try {
|
||||||
|
CommandLineParser parser = new PosixParser();
|
||||||
|
CommandLine cmdLine = parser.parse(helpOptions, args);
|
||||||
|
if (cmdLine.hasOption(helpOpt.getOpt())
|
||||||
|
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
|
||||||
|
// should print out the help information
|
||||||
|
out.println(helpDescription + "\n");
|
||||||
|
if (printGenericCommandUsage) {
|
||||||
|
ToolRunner.printGenericCommandUsage(out);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} catch (ParseException pe) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
|
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
|
@ -26,6 +27,7 @@ import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.io.PrintStream;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.text.DateFormat;
|
import java.text.DateFormat;
|
||||||
|
@ -68,7 +70,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.Util;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -188,6 +189,13 @@ public class Balancer {
|
||||||
*/
|
*/
|
||||||
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
|
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
|
||||||
|
|
||||||
|
private static final String USAGE = "Usage: java "
|
||||||
|
+ Balancer.class.getSimpleName()
|
||||||
|
+ "\n\t[-policy <policy>]\tthe balancing policy: "
|
||||||
|
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
|
||||||
|
+ BalancingPolicy.Pool.INSTANCE.getName()
|
||||||
|
+ "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
|
||||||
|
|
||||||
private final NameNodeConnector nnc;
|
private final NameNodeConnector nnc;
|
||||||
private final BalancingPolicy policy;
|
private final BalancingPolicy policy;
|
||||||
private final double threshold;
|
private final double threshold;
|
||||||
|
@ -1548,7 +1556,7 @@ public class Balancer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch(RuntimeException e) {
|
} catch(RuntimeException e) {
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1556,13 +1564,8 @@ public class Balancer {
|
||||||
return new Parameters(policy, threshold);
|
return new Parameters(policy, threshold);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage() {
|
private static void printUsage(PrintStream out) {
|
||||||
System.out.println("Usage: java " + Balancer.class.getSimpleName());
|
out.println(USAGE + "\n");
|
||||||
System.out.println(" [-policy <policy>]\tthe balancing policy: "
|
|
||||||
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
|
|
||||||
+ BalancingPolicy.Pool.INSTANCE.getName());
|
|
||||||
System.out.println(
|
|
||||||
" [-threshold <threshold>]\tPercentage of disk capacity");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1571,6 +1574,10 @@ public class Balancer {
|
||||||
* @param args Command line arguments
|
* @param args Command line arguments
|
||||||
*/
|
*/
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
|
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
|
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
|
|
|
@ -46,6 +46,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOUR
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
||||||
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
|
@ -55,6 +56,7 @@ import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.io.PrintStream;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.ServerSocket;
|
import java.net.ServerSocket;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
@ -100,8 +102,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
|
@ -126,9 +128,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.Util;
|
import org.apache.hadoop.hdfs.server.common.Util;
|
||||||
|
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
|
@ -173,6 +172,7 @@ import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
import org.mortbay.util.ajax.JSON;
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
|
@ -234,6 +234,8 @@ public class DataNode extends Configured
|
||||||
static final Log ClientTraceLog =
|
static final Log ClientTraceLog =
|
||||||
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
||||||
|
|
||||||
|
private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use {@link NetUtils#createSocketAddr(String)} instead.
|
* Use {@link NetUtils#createSocketAddr(String)} instead.
|
||||||
*/
|
*/
|
||||||
|
@ -1545,7 +1547,7 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!parseArguments(args, conf)) {
|
if (!parseArguments(args, conf)) {
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
Collection<URI> dataDirs = getStorageDirs(conf);
|
Collection<URI> dataDirs = getStorageDirs(conf);
|
||||||
|
@ -1659,9 +1661,8 @@ public class DataNode extends Configured
|
||||||
+ xmitsInProgress.get() + "}";
|
+ xmitsInProgress.get() + "}";
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage() {
|
private static void printUsage(PrintStream out) {
|
||||||
System.err.println("Usage: java DataNode");
|
out.println(USAGE + "\n");
|
||||||
System.err.println(" [-rollback]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1746,6 +1747,10 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String args[]) {
|
public static void main(String args[]) {
|
||||||
|
if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
secureMain(args, null);
|
secureMain(args, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.PrintStream;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -41,6 +42,8 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Trash;
|
import org.apache.hadoop.fs.Trash;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
|
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
@ -72,12 +75,9 @@ import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
||||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||||
|
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||||
import org.apache.hadoop.util.ServicePlugin;
|
import org.apache.hadoop.util.ServicePlugin;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
||||||
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -191,6 +191,22 @@ public class NameNode {
|
||||||
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
|
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
|
||||||
};
|
};
|
||||||
|
|
||||||
|
private static final String USAGE = "Usage: java NameNode ["
|
||||||
|
+ StartupOption.BACKUP.getName() + "] | ["
|
||||||
|
+ StartupOption.CHECKPOINT.getName() + "] | ["
|
||||||
|
+ StartupOption.FORMAT.getName() + " ["
|
||||||
|
+ StartupOption.CLUSTERID.getName() + " cid ] ["
|
||||||
|
+ StartupOption.FORCE.getName() + "] ["
|
||||||
|
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
|
||||||
|
+ StartupOption.UPGRADE.getName() + "] | ["
|
||||||
|
+ StartupOption.ROLLBACK.getName() + "] | ["
|
||||||
|
+ StartupOption.FINALIZE.getName() + "] | ["
|
||||||
|
+ StartupOption.IMPORT.getName() + "] | ["
|
||||||
|
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
|
||||||
|
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
|
||||||
|
+ StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
|
||||||
|
+ " ] ]";
|
||||||
|
|
||||||
public long getProtocolVersion(String protocol,
|
public long getProtocolVersion(String protocol,
|
||||||
long clientVersion) throws IOException {
|
long clientVersion) throws IOException {
|
||||||
if (protocol.equals(ClientProtocol.class.getName())) {
|
if (protocol.equals(ClientProtocol.class.getName())) {
|
||||||
|
@ -943,25 +959,8 @@ public class NameNode {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage() {
|
private static void printUsage(PrintStream out) {
|
||||||
System.err.println(
|
out.println(USAGE + "\n");
|
||||||
"Usage: java NameNode [" +
|
|
||||||
StartupOption.BACKUP.getName() + "] | [" +
|
|
||||||
StartupOption.CHECKPOINT.getName() + "] | [" +
|
|
||||||
StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +
|
|
||||||
" cid ] [" + StartupOption.FORCE.getName() + "] [" +
|
|
||||||
StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
|
|
||||||
StartupOption.UPGRADE.getName() + "] | [" +
|
|
||||||
StartupOption.ROLLBACK.getName() + "] | [" +
|
|
||||||
StartupOption.FINALIZE.getName() + "] | [" +
|
|
||||||
StartupOption.IMPORT.getName() + "] | [" +
|
|
||||||
StartupOption.INITIALIZESHAREDEDITS.getName() +
|
|
||||||
" [" + StartupOption.FORCE.getName() + "] [" +
|
|
||||||
StartupOption.NONINTERACTIVE.getName() + "]" +
|
|
||||||
"] | [" +
|
|
||||||
StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
|
|
||||||
StartupOption.RECOVER.getName() + " [ " +
|
|
||||||
StartupOption.FORCE.getName() + " ] ]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static StartupOption parseArguments(String args[]) {
|
private static StartupOption parseArguments(String args[]) {
|
||||||
|
@ -1109,7 +1108,7 @@ public class NameNode {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
StartupOption startOpt = parseArguments(argv);
|
StartupOption startOpt = parseArguments(argv);
|
||||||
if (startOpt == null) {
|
if (startOpt == null) {
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
setStartupOption(conf, startOpt);
|
setStartupOption(conf, startOpt);
|
||||||
|
@ -1223,6 +1222,10 @@ public class NameNode {
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public static void main(String argv[]) throws Exception {
|
public static void main(String argv[]) throws Exception {
|
||||||
|
if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
|
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
|
||||||
NameNode namenode = createNameNode(argv, null);
|
NameNode namenode = createNameNode(argv, null);
|
||||||
|
|
|
@ -562,6 +562,9 @@ public class SecondaryNameNode implements Runnable {
|
||||||
if (opts == null) {
|
if (opts == null) {
|
||||||
LOG.fatal("Failed to parse options");
|
LOG.fatal("Failed to parse options");
|
||||||
terminate(1);
|
terminate(1);
|
||||||
|
} else if (opts.shouldPrintHelp()) {
|
||||||
|
opts.usage();
|
||||||
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
|
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
|
||||||
|
@ -595,6 +598,7 @@ public class SecondaryNameNode implements Runnable {
|
||||||
private final Option geteditsizeOpt;
|
private final Option geteditsizeOpt;
|
||||||
private final Option checkpointOpt;
|
private final Option checkpointOpt;
|
||||||
private final Option formatOpt;
|
private final Option formatOpt;
|
||||||
|
private final Option helpOpt;
|
||||||
|
|
||||||
|
|
||||||
Command cmd;
|
Command cmd;
|
||||||
|
@ -605,6 +609,7 @@ public class SecondaryNameNode implements Runnable {
|
||||||
|
|
||||||
private boolean shouldForce;
|
private boolean shouldForce;
|
||||||
private boolean shouldFormat;
|
private boolean shouldFormat;
|
||||||
|
private boolean shouldPrintHelp;
|
||||||
|
|
||||||
CommandLineOpts() {
|
CommandLineOpts() {
|
||||||
geteditsizeOpt = new Option("geteditsize",
|
geteditsizeOpt = new Option("geteditsize",
|
||||||
|
@ -612,20 +617,32 @@ public class SecondaryNameNode implements Runnable {
|
||||||
checkpointOpt = OptionBuilder.withArgName("force")
|
checkpointOpt = OptionBuilder.withArgName("force")
|
||||||
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
|
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
|
||||||
formatOpt = new Option("format", "format the local storage during startup");
|
formatOpt = new Option("format", "format the local storage during startup");
|
||||||
|
helpOpt = new Option("h", "help", false, "get help information");
|
||||||
|
|
||||||
options.addOption(geteditsizeOpt);
|
options.addOption(geteditsizeOpt);
|
||||||
options.addOption(checkpointOpt);
|
options.addOption(checkpointOpt);
|
||||||
options.addOption(formatOpt);
|
options.addOption(formatOpt);
|
||||||
|
options.addOption(helpOpt);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean shouldFormat() {
|
public boolean shouldFormat() {
|
||||||
return shouldFormat;
|
return shouldFormat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean shouldPrintHelp() {
|
||||||
|
return shouldPrintHelp;
|
||||||
|
}
|
||||||
|
|
||||||
public void parse(String ... argv) throws ParseException {
|
public void parse(String ... argv) throws ParseException {
|
||||||
CommandLineParser parser = new PosixParser();
|
CommandLineParser parser = new PosixParser();
|
||||||
CommandLine cmdLine = parser.parse(options, argv);
|
CommandLine cmdLine = parser.parse(options, argv);
|
||||||
|
|
||||||
|
if (cmdLine.hasOption(helpOpt.getOpt())
|
||||||
|
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
|
||||||
|
shouldPrintHelp = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
|
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
|
||||||
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
|
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
|
||||||
if (hasGetEdit && hasCheckpoint) {
|
if (hasGetEdit && hasCheckpoint) {
|
||||||
|
@ -662,8 +679,13 @@ public class SecondaryNameNode implements Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
void usage() {
|
void usage() {
|
||||||
|
String header = "The Secondary NameNode is a helper "
|
||||||
|
+ "to the primary NameNode. The Secondary is responsible "
|
||||||
|
+ "for supporting periodic checkpoints of the HDFS metadata. "
|
||||||
|
+ "The current design allows only one Secondary NameNode "
|
||||||
|
+ "per HDFS cluster.";
|
||||||
HelpFormatter formatter = new HelpFormatter();
|
HelpFormatter formatter = new HelpFormatter();
|
||||||
formatter.printHelp("secondarynamenode", options);
|
formatter.printHelp("secondarynamenode", header, options, "", false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,10 @@ public class DFSHAAdmin extends HAAdmin {
|
||||||
this.errOut = errOut;
|
this.errOut = errOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void setOut(PrintStream out) {
|
||||||
|
this.out = out;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setConf(Configuration conf) {
|
public void setConf(Configuration conf) {
|
||||||
if (conf != null) {
|
if (conf != null) {
|
||||||
|
|
|
@ -162,6 +162,10 @@ public class DFSZKFailoverController extends ZKFailoverController {
|
||||||
|
|
||||||
public static void main(String args[])
|
public static void main(String args[])
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
if (DFSUtil.parseHelpArgument(args,
|
||||||
|
ZKFailoverController.USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
GenericOptionsParser parser = new GenericOptionsParser(
|
GenericOptionsParser parser = new GenericOptionsParser(
|
||||||
new HdfsConfiguration(), args);
|
new HdfsConfiguration(), args);
|
||||||
|
|
|
@ -73,6 +73,25 @@ public class DFSck extends Configured implements Tool {
|
||||||
HdfsConfiguration.init();
|
HdfsConfiguration.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final String USAGE = "Usage: DFSck <path> "
|
||||||
|
+ "[-list-corruptfileblocks | "
|
||||||
|
+ "[-move | -delete | -openforwrite] "
|
||||||
|
+ "[-files [-blocks [-locations | -racks]]]]\n"
|
||||||
|
+ "\t<path>\tstart checking from this path\n"
|
||||||
|
+ "\t-move\tmove corrupted files to /lost+found\n"
|
||||||
|
+ "\t-delete\tdelete corrupted files\n"
|
||||||
|
+ "\t-files\tprint out files being checked\n"
|
||||||
|
+ "\t-openforwrite\tprint out files opened for write\n"
|
||||||
|
+ "\t-list-corruptfileblocks\tprint out list of missing "
|
||||||
|
+ "blocks and files they belong to\n"
|
||||||
|
+ "\t-blocks\tprint out block report\n"
|
||||||
|
+ "\t-locations\tprint out locations for every block\n"
|
||||||
|
+ "\t-racks\tprint out network topology for data-node locations\n"
|
||||||
|
+ "\t\tBy default fsck ignores files opened for write, "
|
||||||
|
+ "use -openforwrite to report such files. They are usually "
|
||||||
|
+ " tagged CORRUPT or HEALTHY depending on their block "
|
||||||
|
+ "allocation status";
|
||||||
|
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
private final PrintStream out;
|
private final PrintStream out;
|
||||||
|
|
||||||
|
@ -93,25 +112,9 @@ public class DFSck extends Configured implements Tool {
|
||||||
/**
|
/**
|
||||||
* Print fsck usage information
|
* Print fsck usage information
|
||||||
*/
|
*/
|
||||||
static void printUsage() {
|
static void printUsage(PrintStream out) {
|
||||||
System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " +
|
out.println(USAGE + "\n");
|
||||||
"[-move | -delete | -openforwrite] " +
|
ToolRunner.printGenericCommandUsage(out);
|
||||||
"[-files [-blocks [-locations | -racks]]]]");
|
|
||||||
System.err.println("\t<path>\tstart checking from this path");
|
|
||||||
System.err.println("\t-move\tmove corrupted files to /lost+found");
|
|
||||||
System.err.println("\t-delete\tdelete corrupted files");
|
|
||||||
System.err.println("\t-files\tprint out files being checked");
|
|
||||||
System.err.println("\t-openforwrite\tprint out files opened for write");
|
|
||||||
System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
|
|
||||||
+ "blocks and files they belong to");
|
|
||||||
System.err.println("\t-blocks\tprint out block report");
|
|
||||||
System.err.println("\t-locations\tprint out locations for every block");
|
|
||||||
System.err.println("\t-racks\tprint out network topology for data-node locations");
|
|
||||||
System.err.println("\t\tBy default fsck ignores files opened for write, " +
|
|
||||||
"use -openforwrite to report such files. They are usually " +
|
|
||||||
" tagged CORRUPT or HEALTHY depending on their block " +
|
|
||||||
"allocation status");
|
|
||||||
ToolRunner.printGenericCommandUsage(System.err);
|
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @param args
|
* @param args
|
||||||
|
@ -119,7 +122,7 @@ public class DFSck extends Configured implements Tool {
|
||||||
@Override
|
@Override
|
||||||
public int run(final String[] args) throws IOException {
|
public int run(final String[] args) throws IOException {
|
||||||
if (args.length == 0) {
|
if (args.length == 0) {
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,12 +261,12 @@ public class DFSck extends Configured implements Tool {
|
||||||
} else {
|
} else {
|
||||||
System.err.println("fsck: can only operate on one path at a time '"
|
System.err.println("fsck: can only operate on one path at a time '"
|
||||||
+ args[idx] + "'");
|
+ args[idx] + "'");
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
System.err.println("fsck: Illegal option '" + args[idx] + "'");
|
System.err.println("fsck: Illegal option '" + args[idx] + "'");
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -304,10 +307,14 @@ public class DFSck extends Configured implements Tool {
|
||||||
// -files option is also used by GenericOptionsParser
|
// -files option is also used by GenericOptionsParser
|
||||||
// Make sure that is not the first argument for fsck
|
// Make sure that is not the first argument for fsck
|
||||||
int res = -1;
|
int res = -1;
|
||||||
if ((args.length == 0 ) || ("-files".equals(args[0])))
|
if ((args.length == 0) || ("-files".equals(args[0]))) {
|
||||||
printUsage();
|
printUsage(System.err);
|
||||||
else
|
ToolRunner.printGenericCommandUsage(System.err);
|
||||||
|
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
||||||
|
res = 0;
|
||||||
|
} else {
|
||||||
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
|
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
|
||||||
|
}
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.HftpFileSystem;
|
import org.apache.hadoop.hdfs.HftpFileSystem;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
|
@ -48,9 +47,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.web.URLUtils;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Text;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
@ -71,8 +68,10 @@ public class DelegationTokenFetcher {
|
||||||
private static final String CANCEL = "cancel";
|
private static final String CANCEL = "cancel";
|
||||||
private static final String RENEW = "renew";
|
private static final String RENEW = "renew";
|
||||||
private static final String PRINT = "print";
|
private static final String PRINT = "print";
|
||||||
|
private static final String HELP = "help";
|
||||||
|
private static final String HELP_SHORT = "h";
|
||||||
|
|
||||||
private static void printUsage(PrintStream err) throws IOException {
|
private static void printUsage(PrintStream err) {
|
||||||
err.println("fetchdt retrieves delegation tokens from the NameNode");
|
err.println("fetchdt retrieves delegation tokens from the NameNode");
|
||||||
err.println();
|
err.println();
|
||||||
err.println("fetchdt <opts> <token file>");
|
err.println("fetchdt <opts> <token file>");
|
||||||
|
@ -107,6 +106,7 @@ public class DelegationTokenFetcher {
|
||||||
fetcherOptions.addOption(CANCEL, false, "cancel the token");
|
fetcherOptions.addOption(CANCEL, false, "cancel the token");
|
||||||
fetcherOptions.addOption(RENEW, false, "renew the token");
|
fetcherOptions.addOption(RENEW, false, "renew the token");
|
||||||
fetcherOptions.addOption(PRINT, false, "print the token");
|
fetcherOptions.addOption(PRINT, false, "print the token");
|
||||||
|
fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
|
||||||
GenericOptionsParser parser = new GenericOptionsParser(conf,
|
GenericOptionsParser parser = new GenericOptionsParser(conf,
|
||||||
fetcherOptions, args);
|
fetcherOptions, args);
|
||||||
CommandLine cmd = parser.getCommandLine();
|
CommandLine cmd = parser.getCommandLine();
|
||||||
|
@ -119,9 +119,14 @@ public class DelegationTokenFetcher {
|
||||||
final boolean cancel = cmd.hasOption(CANCEL);
|
final boolean cancel = cmd.hasOption(CANCEL);
|
||||||
final boolean renew = cmd.hasOption(RENEW);
|
final boolean renew = cmd.hasOption(RENEW);
|
||||||
final boolean print = cmd.hasOption(PRINT);
|
final boolean print = cmd.hasOption(PRINT);
|
||||||
|
final boolean help = cmd.hasOption(HELP);
|
||||||
String[] remaining = parser.getRemainingArgs();
|
String[] remaining = parser.getRemainingArgs();
|
||||||
|
|
||||||
// check option validity
|
// check option validity
|
||||||
|
if (help) {
|
||||||
|
printUsage(System.out);
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
if (cancel && renew || cancel && print || renew && print || cancel && renew
|
if (cancel && renew || cancel && print || renew && print || cancel && renew
|
||||||
&& print) {
|
&& print) {
|
||||||
System.err.println("ERROR: Only specify cancel, renew or print.");
|
System.err.println("ERROR: Only specify cancel, renew or print.");
|
||||||
|
|
|
@ -324,6 +324,10 @@ public class GetConf extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
|
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
@ -44,6 +45,8 @@ public class GetGroups extends GetGroupsBase {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(GetGroups.class);
|
private static final Log LOG = LogFactory.getLog(GetGroups.class);
|
||||||
|
|
||||||
|
static final String USAGE = "Usage: hdfs groups [username ...]";
|
||||||
|
|
||||||
static{
|
static{
|
||||||
HdfsConfiguration.init();
|
HdfsConfiguration.init();
|
||||||
}
|
}
|
||||||
|
@ -86,6 +89,10 @@ public class GetGroups extends GetGroupsBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] argv) throws Exception {
|
public static void main(String[] argv) throws Exception {
|
||||||
|
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
|
||||||
|
System.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
|
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,9 @@ public class TestDFSHAAdmin {
|
||||||
|
|
||||||
private DFSHAAdmin tool;
|
private DFSHAAdmin tool;
|
||||||
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
||||||
|
private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
|
||||||
private String errOutput;
|
private String errOutput;
|
||||||
|
private String output;
|
||||||
private HAServiceProtocol mockProtocol;
|
private HAServiceProtocol mockProtocol;
|
||||||
private ZKFCProtocol mockZkfcProtocol;
|
private ZKFCProtocol mockZkfcProtocol;
|
||||||
|
|
||||||
|
@ -111,12 +113,14 @@ public class TestDFSHAAdmin {
|
||||||
};
|
};
|
||||||
tool.setConf(getHAConf());
|
tool.setConf(getHAConf());
|
||||||
tool.setErrOut(new PrintStream(errOutBytes));
|
tool.setErrOut(new PrintStream(errOutBytes));
|
||||||
|
tool.setOut(new PrintStream(outBytes));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertOutputContains(String string) {
|
private void assertOutputContains(String string) {
|
||||||
if (!errOutput.contains(string)) {
|
if (!errOutput.contains(string) && !output.contains(string)) {
|
||||||
fail("Expected output to contain '" + string + "' but was:\n" +
|
fail("Expected output to contain '" + string +
|
||||||
errOutput);
|
"' but err_output was:\n" + errOutput +
|
||||||
|
"\n and output was: \n" + output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +147,7 @@ public class TestDFSHAAdmin {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHelp() throws Exception {
|
public void testHelp() throws Exception {
|
||||||
assertEquals(-1, runTool("-help"));
|
assertEquals(0, runTool("-help"));
|
||||||
assertEquals(0, runTool("-help", "transitionToActive"));
|
assertEquals(0, runTool("-help", "transitionToActive"));
|
||||||
assertOutputContains("Transitions the service into Active");
|
assertOutputContains("Transitions the service into Active");
|
||||||
}
|
}
|
||||||
|
@ -378,10 +382,12 @@ public class TestDFSHAAdmin {
|
||||||
|
|
||||||
private Object runTool(String ... args) throws Exception {
|
private Object runTool(String ... args) throws Exception {
|
||||||
errOutBytes.reset();
|
errOutBytes.reset();
|
||||||
|
outBytes.reset();
|
||||||
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
|
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
|
||||||
int ret = tool.run(args);
|
int ret = tool.run(args);
|
||||||
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
||||||
LOG.info("Output:\n" + errOutput);
|
output = new String(outBytes.toByteArray(), Charsets.UTF_8);
|
||||||
|
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue