Reverting previous incomplete change r1373170 for HDFS-3723
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1373172 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f026d8bb1b
commit
231a52a7df
@ -114,9 +114,6 @@ Trunk (unreleased changes)
|
|||||||
HDFS-3789. JournalManager#format() should be able to throw IOException
|
HDFS-3789. JournalManager#format() should be able to throw IOException
|
||||||
(Ivan Kelly via todd)
|
(Ivan Kelly via todd)
|
||||||
|
|
||||||
HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
|
|
||||||
suresh)
|
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -18,21 +18,8 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
@ -46,17 +33,10 @@
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
import org.apache.commons.cli.CommandLine;
|
|
||||||
import org.apache.commons.cli.CommandLineParser;
|
|
||||||
import org.apache.commons.cli.Option;
|
|
||||||
import org.apache.commons.cli.Options;
|
|
||||||
import org.apache.commons.cli.ParseException;
|
|
||||||
import org.apache.commons.cli.PosixParser;
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -77,7 +57,8 @@
|
|||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
@ -443,6 +424,7 @@ private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
|
|||||||
*
|
*
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return list of InetSocketAddresses
|
* @return list of InetSocketAddresses
|
||||||
|
* @throws IOException if no addresses are configured
|
||||||
*/
|
*/
|
||||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
||||||
Configuration conf) {
|
Configuration conf) {
|
||||||
@ -1091,44 +1073,4 @@ public static String getOnlyNameServiceIdOrNull(Configuration conf) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Options helpOptions = new Options();
|
|
||||||
public static Option helpOpt = new Option("h", "help", false,
|
|
||||||
"get help information");
|
|
||||||
|
|
||||||
static {
|
|
||||||
helpOptions.addOption(helpOpt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Parse the arguments for commands
|
|
||||||
*
|
|
||||||
* @param args the argument to be parsed
|
|
||||||
* @param helpDescription help information to be printed out
|
|
||||||
* @param out Printer
|
|
||||||
* @param printGenericCommandUsage whether to print the
|
|
||||||
* generic command usage defined in ToolRunner
|
|
||||||
* @return true when the argument matches help option, false if not
|
|
||||||
*/
|
|
||||||
public static boolean parseHelpArgument(String[] args,
|
|
||||||
String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
|
|
||||||
if (args.length == 1) {
|
|
||||||
try {
|
|
||||||
CommandLineParser parser = new PosixParser();
|
|
||||||
CommandLine cmdLine = parser.parse(helpOptions, args);
|
|
||||||
if (cmdLine.hasOption(helpOpt.getOpt())
|
|
||||||
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
|
|
||||||
// should print out the help information
|
|
||||||
out.println(helpDescription + "\n");
|
|
||||||
if (printGenericCommandUsage) {
|
|
||||||
ToolRunner.printGenericCommandUsage(out);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} catch (ParseException pe) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
|
import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
@ -27,7 +26,6 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.text.DateFormat;
|
import java.text.DateFormat;
|
||||||
@ -70,6 +68,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Util;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
@ -80,6 +79,7 @@
|
|||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
import static com.google.common.base.Preconditions.checkArgument;
|
||||||
|
|
||||||
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
|
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
|
||||||
* when some datanodes become full or when new empty nodes join the cluster.
|
* when some datanodes become full or when new empty nodes join the cluster.
|
||||||
@ -189,13 +189,6 @@ public class Balancer {
|
|||||||
*/
|
*/
|
||||||
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
|
public static final int MAX_NUM_CONCURRENT_MOVES = 5;
|
||||||
|
|
||||||
private static final String USAGE = "Usage: java "
|
|
||||||
+ Balancer.class.getSimpleName()
|
|
||||||
+ "\n\t[-policy <policy>]\tthe balancing policy: "
|
|
||||||
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
|
|
||||||
+ BalancingPolicy.Pool.INSTANCE.getName()
|
|
||||||
+ "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
|
|
||||||
|
|
||||||
private final NameNodeConnector nnc;
|
private final NameNodeConnector nnc;
|
||||||
private final BalancingPolicy policy;
|
private final BalancingPolicy policy;
|
||||||
private final double threshold;
|
private final double threshold;
|
||||||
@ -1557,7 +1550,7 @@ static Parameters parse(String[] args) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch(RuntimeException e) {
|
} catch(RuntimeException e) {
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1565,8 +1558,13 @@ static Parameters parse(String[] args) {
|
|||||||
return new Parameters(policy, threshold);
|
return new Parameters(policy, threshold);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage(PrintStream out) {
|
private static void printUsage() {
|
||||||
out.println(USAGE + "\n");
|
System.out.println("Usage: java " + Balancer.class.getSimpleName());
|
||||||
|
System.out.println(" [-policy <policy>]\tthe balancing policy: "
|
||||||
|
+ BalancingPolicy.Node.INSTANCE.getName() + " or "
|
||||||
|
+ BalancingPolicy.Pool.INSTANCE.getName());
|
||||||
|
System.out.println(
|
||||||
|
" [-threshold <threshold>]\tPercentage of disk capacity");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1575,10 +1573,6 @@ private static void printUsage(PrintStream out) {
|
|||||||
* @param args Command line arguments
|
* @param args Command line arguments
|
||||||
*/
|
*/
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
|
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
|
@ -46,7 +46,6 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
@ -56,7 +55,6 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.ServerSocket;
|
import java.net.ServerSocket;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
@ -100,8 +98,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||||
@ -126,6 +124,9 @@
|
|||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.Util;
|
import org.apache.hadoop.hdfs.server.common.Util;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
@ -170,9 +171,9 @@
|
|||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
import org.mortbay.util.ajax.JSON;
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
|
|
||||||
/**********************************************************
|
/**********************************************************
|
||||||
@ -230,8 +231,6 @@ public class DataNode extends Configured
|
|||||||
static final Log ClientTraceLog =
|
static final Log ClientTraceLog =
|
||||||
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
||||||
|
|
||||||
private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use {@link NetUtils#createSocketAddr(String)} instead.
|
* Use {@link NetUtils#createSocketAddr(String)} instead.
|
||||||
*/
|
*/
|
||||||
@ -1542,7 +1541,7 @@ public static DataNode instantiateDataNode(String args [], Configuration conf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!parseArguments(args, conf)) {
|
if (!parseArguments(args, conf)) {
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
Collection<URI> dataDirs = getStorageDirs(conf);
|
Collection<URI> dataDirs = getStorageDirs(conf);
|
||||||
@ -1656,8 +1655,9 @@ public String toString() {
|
|||||||
+ xmitsInProgress.get() + "}";
|
+ xmitsInProgress.get() + "}";
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage(PrintStream out) {
|
private static void printUsage() {
|
||||||
out.println(USAGE + "\n");
|
System.err.println("Usage: java DataNode");
|
||||||
|
System.err.println(" [-rollback]");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1742,10 +1742,6 @@ public static void secureMain(String args[], SecureResources resources) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String args[]) {
|
public static void main(String args[]) {
|
||||||
if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
secureMain(args, null);
|
secureMain(args, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -39,8 +38,6 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Trash;
|
import org.apache.hadoop.fs.Trash;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
|
||||||
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
@ -72,9 +69,12 @@
|
|||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
||||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
|
||||||
import org.apache.hadoop.util.ServicePlugin;
|
import org.apache.hadoop.util.ServicePlugin;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||||
|
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
@ -188,22 +188,6 @@ public static enum OperationCategory {
|
|||||||
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
|
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
|
||||||
};
|
};
|
||||||
|
|
||||||
private static final String USAGE = "Usage: java NameNode ["
|
|
||||||
+ StartupOption.BACKUP.getName() + "] | ["
|
|
||||||
+ StartupOption.CHECKPOINT.getName() + "] | ["
|
|
||||||
+ StartupOption.FORMAT.getName() + " ["
|
|
||||||
+ StartupOption.CLUSTERID.getName() + " cid ] ["
|
|
||||||
+ StartupOption.FORCE.getName() + "] ["
|
|
||||||
+ StartupOption.NONINTERACTIVE.getName() + "] ] | ["
|
|
||||||
+ StartupOption.UPGRADE.getName() + "] | ["
|
|
||||||
+ StartupOption.ROLLBACK.getName() + "] | ["
|
|
||||||
+ StartupOption.FINALIZE.getName() + "] | ["
|
|
||||||
+ StartupOption.IMPORT.getName() + "] | ["
|
|
||||||
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
|
|
||||||
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
|
|
||||||
+ StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
|
|
||||||
+ " ] ]";
|
|
||||||
|
|
||||||
public long getProtocolVersion(String protocol,
|
public long getProtocolVersion(String protocol,
|
||||||
long clientVersion) throws IOException {
|
long clientVersion) throws IOException {
|
||||||
if (protocol.equals(ClientProtocol.class.getName())) {
|
if (protocol.equals(ClientProtocol.class.getName())) {
|
||||||
@ -911,8 +895,25 @@ private static boolean finalize(Configuration conf,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage(PrintStream out) {
|
private static void printUsage() {
|
||||||
out.println(USAGE + "\n");
|
System.err.println(
|
||||||
|
"Usage: java NameNode [" +
|
||||||
|
StartupOption.BACKUP.getName() + "] | [" +
|
||||||
|
StartupOption.CHECKPOINT.getName() + "] | [" +
|
||||||
|
StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +
|
||||||
|
" cid ] [" + StartupOption.FORCE.getName() + "] [" +
|
||||||
|
StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
|
||||||
|
StartupOption.UPGRADE.getName() + "] | [" +
|
||||||
|
StartupOption.ROLLBACK.getName() + "] | [" +
|
||||||
|
StartupOption.FINALIZE.getName() + "] | [" +
|
||||||
|
StartupOption.IMPORT.getName() + "] | [" +
|
||||||
|
StartupOption.INITIALIZESHAREDEDITS.getName() +
|
||||||
|
" [" + StartupOption.FORCE.getName() + "] [" +
|
||||||
|
StartupOption.NONINTERACTIVE.getName() + "]" +
|
||||||
|
"] | [" +
|
||||||
|
StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
|
||||||
|
StartupOption.RECOVER.getName() + " [ " +
|
||||||
|
StartupOption.FORCE.getName() + " ] ]");
|
||||||
}
|
}
|
||||||
|
|
||||||
private static StartupOption parseArguments(String args[]) {
|
private static StartupOption parseArguments(String args[]) {
|
||||||
@ -1060,7 +1061,7 @@ public static NameNode createNameNode(String argv[], Configuration conf)
|
|||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
StartupOption startOpt = parseArguments(argv);
|
StartupOption startOpt = parseArguments(argv);
|
||||||
if (startOpt == null) {
|
if (startOpt == null) {
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
setStartupOption(conf, startOpt);
|
setStartupOption(conf, startOpt);
|
||||||
@ -1174,10 +1175,6 @@ protected String getNameServiceId(Configuration conf) {
|
|||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public static void main(String argv[]) throws Exception {
|
public static void main(String argv[]) throws Exception {
|
||||||
if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
|
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
|
||||||
NameNode namenode = createNameNode(argv, null);
|
NameNode namenode = createNameNode(argv, null);
|
||||||
|
@ -562,9 +562,6 @@ public static void main(String[] argv) throws Exception {
|
|||||||
if (opts == null) {
|
if (opts == null) {
|
||||||
LOG.fatal("Failed to parse options");
|
LOG.fatal("Failed to parse options");
|
||||||
terminate(1);
|
terminate(1);
|
||||||
} else if (opts.shouldPrintHelp()) {
|
|
||||||
opts.usage();
|
|
||||||
System.exit(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
|
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
|
||||||
@ -598,7 +595,6 @@ static class CommandLineOpts {
|
|||||||
private final Option geteditsizeOpt;
|
private final Option geteditsizeOpt;
|
||||||
private final Option checkpointOpt;
|
private final Option checkpointOpt;
|
||||||
private final Option formatOpt;
|
private final Option formatOpt;
|
||||||
private final Option helpOpt;
|
|
||||||
|
|
||||||
|
|
||||||
Command cmd;
|
Command cmd;
|
||||||
@ -609,7 +605,6 @@ enum Command {
|
|||||||
|
|
||||||
private boolean shouldForce;
|
private boolean shouldForce;
|
||||||
private boolean shouldFormat;
|
private boolean shouldFormat;
|
||||||
private boolean shouldPrintHelp;
|
|
||||||
|
|
||||||
CommandLineOpts() {
|
CommandLineOpts() {
|
||||||
geteditsizeOpt = new Option("geteditsize",
|
geteditsizeOpt = new Option("geteditsize",
|
||||||
@ -617,32 +612,20 @@ enum Command {
|
|||||||
checkpointOpt = OptionBuilder.withArgName("force")
|
checkpointOpt = OptionBuilder.withArgName("force")
|
||||||
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
|
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
|
||||||
formatOpt = new Option("format", "format the local storage during startup");
|
formatOpt = new Option("format", "format the local storage during startup");
|
||||||
helpOpt = new Option("h", "help", false, "get help information");
|
|
||||||
|
|
||||||
options.addOption(geteditsizeOpt);
|
options.addOption(geteditsizeOpt);
|
||||||
options.addOption(checkpointOpt);
|
options.addOption(checkpointOpt);
|
||||||
options.addOption(formatOpt);
|
options.addOption(formatOpt);
|
||||||
options.addOption(helpOpt);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean shouldFormat() {
|
public boolean shouldFormat() {
|
||||||
return shouldFormat;
|
return shouldFormat;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean shouldPrintHelp() {
|
|
||||||
return shouldPrintHelp;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void parse(String ... argv) throws ParseException {
|
public void parse(String ... argv) throws ParseException {
|
||||||
CommandLineParser parser = new PosixParser();
|
CommandLineParser parser = new PosixParser();
|
||||||
CommandLine cmdLine = parser.parse(options, argv);
|
CommandLine cmdLine = parser.parse(options, argv);
|
||||||
|
|
||||||
if (cmdLine.hasOption(helpOpt.getOpt())
|
|
||||||
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
|
|
||||||
shouldPrintHelp = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
|
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
|
||||||
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
|
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
|
||||||
if (hasGetEdit && hasCheckpoint) {
|
if (hasGetEdit && hasCheckpoint) {
|
||||||
@ -679,13 +662,8 @@ public boolean shouldForceCheckpoint() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void usage() {
|
void usage() {
|
||||||
String header = "The Secondary NameNode is a helper "
|
|
||||||
+ "to the primary NameNode. The Secondary is responsible "
|
|
||||||
+ "for supporting periodic checkpoints of the HDFS metadata. "
|
|
||||||
+ "The current design allows only one Secondary NameNode "
|
|
||||||
+ "per HDFS cluster.";
|
|
||||||
HelpFormatter formatter = new HelpFormatter();
|
HelpFormatter formatter = new HelpFormatter();
|
||||||
formatter.printHelp("secondarynamenode", header, options, "", false);
|
formatter.printHelp("secondarynamenode", options);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,10 +43,6 @@ protected void setErrOut(PrintStream errOut) {
|
|||||||
this.errOut = errOut;
|
this.errOut = errOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void setOut(PrintStream out) {
|
|
||||||
this.out = out;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setConf(Configuration conf) {
|
public void setConf(Configuration conf) {
|
||||||
if (conf != null) {
|
if (conf != null) {
|
||||||
|
@ -162,10 +162,6 @@ protected String getScopeInsideParentNode() {
|
|||||||
|
|
||||||
public static void main(String args[])
|
public static void main(String args[])
|
||||||
throws Exception {
|
throws Exception {
|
||||||
if (DFSUtil.parseHelpArgument(args,
|
|
||||||
ZKFailoverController.USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
GenericOptionsParser parser = new GenericOptionsParser(
|
GenericOptionsParser parser = new GenericOptionsParser(
|
||||||
new HdfsConfiguration(), args);
|
new HdfsConfiguration(), args);
|
||||||
|
@ -73,25 +73,6 @@ public class DFSck extends Configured implements Tool {
|
|||||||
HdfsConfiguration.init();
|
HdfsConfiguration.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String USAGE = "Usage: DFSck <path> "
|
|
||||||
+ "[-list-corruptfileblocks | "
|
|
||||||
+ "[-move | -delete | -openforwrite] "
|
|
||||||
+ "[-files [-blocks [-locations | -racks]]]]\n"
|
|
||||||
+ "\t<path>\tstart checking from this path\n"
|
|
||||||
+ "\t-move\tmove corrupted files to /lost+found\n"
|
|
||||||
+ "\t-delete\tdelete corrupted files\n"
|
|
||||||
+ "\t-files\tprint out files being checked\n"
|
|
||||||
+ "\t-openforwrite\tprint out files opened for write\n"
|
|
||||||
+ "\t-list-corruptfileblocks\tprint out list of missing "
|
|
||||||
+ "blocks and files they belong to\n"
|
|
||||||
+ "\t-blocks\tprint out block report\n"
|
|
||||||
+ "\t-locations\tprint out locations for every block\n"
|
|
||||||
+ "\t-racks\tprint out network topology for data-node locations\n"
|
|
||||||
+ "\t\tBy default fsck ignores files opened for write, "
|
|
||||||
+ "use -openforwrite to report such files. They are usually "
|
|
||||||
+ " tagged CORRUPT or HEALTHY depending on their block "
|
|
||||||
+ "allocation status";
|
|
||||||
|
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
private final PrintStream out;
|
private final PrintStream out;
|
||||||
|
|
||||||
@ -112,9 +93,25 @@ public DFSck(Configuration conf, PrintStream out) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* Print fsck usage information
|
* Print fsck usage information
|
||||||
*/
|
*/
|
||||||
static void printUsage(PrintStream out) {
|
static void printUsage() {
|
||||||
out.println(USAGE + "\n");
|
System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " +
|
||||||
ToolRunner.printGenericCommandUsage(out);
|
"[-move | -delete | -openforwrite] " +
|
||||||
|
"[-files [-blocks [-locations | -racks]]]]");
|
||||||
|
System.err.println("\t<path>\tstart checking from this path");
|
||||||
|
System.err.println("\t-move\tmove corrupted files to /lost+found");
|
||||||
|
System.err.println("\t-delete\tdelete corrupted files");
|
||||||
|
System.err.println("\t-files\tprint out files being checked");
|
||||||
|
System.err.println("\t-openforwrite\tprint out files opened for write");
|
||||||
|
System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
|
||||||
|
+ "blocks and files they belong to");
|
||||||
|
System.err.println("\t-blocks\tprint out block report");
|
||||||
|
System.err.println("\t-locations\tprint out locations for every block");
|
||||||
|
System.err.println("\t-racks\tprint out network topology for data-node locations");
|
||||||
|
System.err.println("\t\tBy default fsck ignores files opened for write, " +
|
||||||
|
"use -openforwrite to report such files. They are usually " +
|
||||||
|
" tagged CORRUPT or HEALTHY depending on their block " +
|
||||||
|
"allocation status");
|
||||||
|
ToolRunner.printGenericCommandUsage(System.err);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @param args
|
* @param args
|
||||||
@ -122,7 +119,7 @@ static void printUsage(PrintStream out) {
|
|||||||
@Override
|
@Override
|
||||||
public int run(final String[] args) throws IOException {
|
public int run(final String[] args) throws IOException {
|
||||||
if (args.length == 0) {
|
if (args.length == 0) {
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,12 +258,12 @@ else if (args[idx].equals("-list-corruptfileblocks")) {
|
|||||||
} else {
|
} else {
|
||||||
System.err.println("fsck: can only operate on one path at a time '"
|
System.err.println("fsck: can only operate on one path at a time '"
|
||||||
+ args[idx] + "'");
|
+ args[idx] + "'");
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
System.err.println("fsck: Illegal option '" + args[idx] + "'");
|
System.err.println("fsck: Illegal option '" + args[idx] + "'");
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -307,14 +304,10 @@ public static void main(String[] args) throws Exception {
|
|||||||
// -files option is also used by GenericOptionsParser
|
// -files option is also used by GenericOptionsParser
|
||||||
// Make sure that is not the first argument for fsck
|
// Make sure that is not the first argument for fsck
|
||||||
int res = -1;
|
int res = -1;
|
||||||
if ((args.length == 0) || ("-files".equals(args[0]))) {
|
if ((args.length == 0 ) || ("-files".equals(args[0])))
|
||||||
printUsage(System.err);
|
printUsage();
|
||||||
ToolRunner.printGenericCommandUsage(System.err);
|
else
|
||||||
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
|
||||||
res = 0;
|
|
||||||
} else {
|
|
||||||
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
|
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
|
||||||
}
|
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.HftpFileSystem;
|
import org.apache.hadoop.hdfs.HftpFileSystem;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
@ -47,7 +48,9 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
||||||
|
import org.apache.hadoop.hdfs.web.URLUtils;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
@ -68,10 +71,8 @@ public class DelegationTokenFetcher {
|
|||||||
private static final String CANCEL = "cancel";
|
private static final String CANCEL = "cancel";
|
||||||
private static final String RENEW = "renew";
|
private static final String RENEW = "renew";
|
||||||
private static final String PRINT = "print";
|
private static final String PRINT = "print";
|
||||||
private static final String HELP = "help";
|
|
||||||
private static final String HELP_SHORT = "h";
|
|
||||||
|
|
||||||
private static void printUsage(PrintStream err) {
|
private static void printUsage(PrintStream err) throws IOException {
|
||||||
err.println("fetchdt retrieves delegation tokens from the NameNode");
|
err.println("fetchdt retrieves delegation tokens from the NameNode");
|
||||||
err.println();
|
err.println();
|
||||||
err.println("fetchdt <opts> <token file>");
|
err.println("fetchdt <opts> <token file>");
|
||||||
@ -106,7 +107,6 @@ public static void main(final String[] args) throws Exception {
|
|||||||
fetcherOptions.addOption(CANCEL, false, "cancel the token");
|
fetcherOptions.addOption(CANCEL, false, "cancel the token");
|
||||||
fetcherOptions.addOption(RENEW, false, "renew the token");
|
fetcherOptions.addOption(RENEW, false, "renew the token");
|
||||||
fetcherOptions.addOption(PRINT, false, "print the token");
|
fetcherOptions.addOption(PRINT, false, "print the token");
|
||||||
fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
|
|
||||||
GenericOptionsParser parser = new GenericOptionsParser(conf,
|
GenericOptionsParser parser = new GenericOptionsParser(conf,
|
||||||
fetcherOptions, args);
|
fetcherOptions, args);
|
||||||
CommandLine cmd = parser.getCommandLine();
|
CommandLine cmd = parser.getCommandLine();
|
||||||
@ -119,14 +119,9 @@ public static void main(final String[] args) throws Exception {
|
|||||||
final boolean cancel = cmd.hasOption(CANCEL);
|
final boolean cancel = cmd.hasOption(CANCEL);
|
||||||
final boolean renew = cmd.hasOption(RENEW);
|
final boolean renew = cmd.hasOption(RENEW);
|
||||||
final boolean print = cmd.hasOption(PRINT);
|
final boolean print = cmd.hasOption(PRINT);
|
||||||
final boolean help = cmd.hasOption(HELP);
|
|
||||||
String[] remaining = parser.getRemainingArgs();
|
String[] remaining = parser.getRemainingArgs();
|
||||||
|
|
||||||
// check option validity
|
// check option validity
|
||||||
if (help) {
|
|
||||||
printUsage(System.out);
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
if (cancel && renew || cancel && print || renew && print || cancel && renew
|
if (cancel && renew || cancel && print || renew && print || cancel && renew
|
||||||
&& print) {
|
&& print) {
|
||||||
System.err.println("ERROR: Only specify cancel, renew or print.");
|
System.err.println("ERROR: Only specify cancel, renew or print.");
|
||||||
|
@ -324,10 +324,6 @@ public Integer run() throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
|
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
@ -45,8 +44,6 @@ public class GetGroups extends GetGroupsBase {
|
|||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(GetGroups.class);
|
private static final Log LOG = LogFactory.getLog(GetGroups.class);
|
||||||
|
|
||||||
static final String USAGE = "Usage: hdfs groups [username ...]";
|
|
||||||
|
|
||||||
static{
|
static{
|
||||||
HdfsConfiguration.init();
|
HdfsConfiguration.init();
|
||||||
}
|
}
|
||||||
@ -89,10 +86,6 @@ protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] argv) throws Exception {
|
public static void main(String[] argv) throws Exception {
|
||||||
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
|
|
||||||
System.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
|
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
|
@ -55,9 +55,7 @@ public class TestDFSHAAdmin {
|
|||||||
|
|
||||||
private DFSHAAdmin tool;
|
private DFSHAAdmin tool;
|
||||||
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
|
||||||
private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
|
|
||||||
private String errOutput;
|
private String errOutput;
|
||||||
private String output;
|
|
||||||
private HAServiceProtocol mockProtocol;
|
private HAServiceProtocol mockProtocol;
|
||||||
private ZKFCProtocol mockZkfcProtocol;
|
private ZKFCProtocol mockZkfcProtocol;
|
||||||
|
|
||||||
@ -113,14 +111,12 @@ protected HAServiceTarget resolveTarget(String nnId) {
|
|||||||
};
|
};
|
||||||
tool.setConf(getHAConf());
|
tool.setConf(getHAConf());
|
||||||
tool.setErrOut(new PrintStream(errOutBytes));
|
tool.setErrOut(new PrintStream(errOutBytes));
|
||||||
tool.setOut(new PrintStream(outBytes));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertOutputContains(String string) {
|
private void assertOutputContains(String string) {
|
||||||
if (!errOutput.contains(string) && !output.contains(string)) {
|
if (!errOutput.contains(string)) {
|
||||||
fail("Expected output to contain '" + string +
|
fail("Expected output to contain '" + string + "' but was:\n" +
|
||||||
"' but err_output was:\n" + errOutput +
|
errOutput);
|
||||||
"\n and output was: \n" + output);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +143,7 @@ public void testNamenodeResolution() throws Exception {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testHelp() throws Exception {
|
public void testHelp() throws Exception {
|
||||||
assertEquals(0, runTool("-help"));
|
assertEquals(-1, runTool("-help"));
|
||||||
assertEquals(0, runTool("-help", "transitionToActive"));
|
assertEquals(0, runTool("-help", "transitionToActive"));
|
||||||
assertOutputContains("Transitions the service into Active");
|
assertOutputContains("Transitions the service into Active");
|
||||||
}
|
}
|
||||||
@ -382,12 +378,10 @@ public void testFencingConfigPerNameNode() throws Exception {
|
|||||||
|
|
||||||
private Object runTool(String ... args) throws Exception {
|
private Object runTool(String ... args) throws Exception {
|
||||||
errOutBytes.reset();
|
errOutBytes.reset();
|
||||||
outBytes.reset();
|
|
||||||
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
|
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
|
||||||
int ret = tool.run(args);
|
int ret = tool.run(args);
|
||||||
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
|
||||||
output = new String(outBytes.toByteArray(), Charsets.UTF_8);
|
LOG.info("Output:\n" + errOutput);
|
||||||
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user