HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.

This commit is contained in:
Brahma Reddy Battula 2017-07-19 23:21:43 +08:00
parent df180259b0
commit f8cd55fe33
1 changed files with 33 additions and 22 deletions

View File

@ -1113,29 +1113,39 @@ public class DFSAdmin extends FsShell {
"\tor gets a list of reconfigurable properties.\n" + "\tor gets a list of reconfigurable properties.\n" +
"\tThe second parameter specifies the node type\n"; "\tThe second parameter specifies the node type\n";
String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" + String genericRefresh = "-refresh: Arguments are <hostname:ipc_port>" +
"\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" + " <resource_identifier> [arg1..argn]\n" +
"\ton <hostname:port>. All other args after are sent to the host.\n"; "\tTriggers a runtime-refresh of the resource specified by " +
"<resource_identifier> on <hostname:ipc_port>.\n" +
"\tAll other args after are sent to the host.\n" +
"\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String printTopology = "-printTopology: Print a tree of the racks and their\n" + String printTopology = "-printTopology: Print a tree of the racks and their\n" +
"\t\tnodes as reported by the Namenode\n"; "\t\tnodes as reported by the Namenode\n";
String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as argument,\n"+ String refreshNamenodes = "-refreshNamenodes: Takes a " +
"\t\tFor the given datanode, reloads the configuration files,\n" + "datanodehost:ipc_port as argument,For the given datanode\n" +
"\t\tstops serving the removed block-pools\n"+ "\t\treloads the configuration files,stops serving the removed\n" +
"\t\tand starts serving new block-pools\n"; "\t\tblock-pools and starts serving new block-pools.\n" +
"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as " String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+ "argument,\n\t\tFor the given datanode, get the volume report\n"; " as argument,For the given datanode,get the volume report.\n" +
"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+ String deleteBlockPool = "-deleteBlockPool: Arguments are " +
"\t\t and an optional argument \"force\". If force is passed,\n"+ "datanodehost:ipc_port, blockpool id and an optional argument\n" +
"\t\t block pool directory for the given blockpool id on the given\n"+ "\t\t\"force\". If force is passed,block pool directory for\n" +
"\t\t datanode is deleted along with its contents, otherwise\n"+ "\t\tthe given blockpool id on the given datanode is deleted\n" +
"\t\t the directory is deleted only if it is empty. The command\n" + "\t\talong with its contents,otherwise the directory is deleted\n"+
"\t\t will fail if datanode is still serving the block pool.\n" + "\t\tonly if it is empty.The command will fail if datanode is\n" +
"\t\t Refer to refreshNamenodes to shutdown a block pool\n" + "\t\tstill serving the block pool.Refer to refreshNamenodes to\n" +
"\t\t service on a datanode.\n"; "\t\tshutdown a block pool service on a datanode.\n" +
"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" + String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" +
"\tChanges the network bandwidth used by each datanode during\n" + "\tChanges the network bandwidth used by each datanode during\n" +
@ -1893,23 +1903,24 @@ public class DFSAdmin extends FsShell {
+ " [-refreshCallQueue]"); + " [-refreshCallQueue]");
} else if ("-reconfig".equals(cmd)) { } else if ("-reconfig".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-reconfig <namenode|datanode> <host:port> " + " [-reconfig <namenode|datanode> <host:ipc_port> "
+ "<start|status|properties>]"); + "<start|status|properties>]");
} else if ("-refresh".equals(cmd)) { } else if ("-refresh".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-refresh <hostname:port> <resource_identifier> [arg1..argn]"); + " [-refresh <hostname:ipc_port> "
+ "<resource_identifier> [arg1..argn]");
} else if ("-printTopology".equals(cmd)) { } else if ("-printTopology".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-printTopology]"); + " [-printTopology]");
} else if ("-refreshNamenodes".equals(cmd)) { } else if ("-refreshNamenodes".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshNamenodes datanode-host:port]"); + " [-refreshNamenodes datanode-host:ipc_port]");
} else if ("-getVolumeReport".equals(cmd)) { } else if ("-getVolumeReport".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-getVolumeReport datanode-host:port]"); + " [-getVolumeReport datanode-host:ipc_port]");
} else if ("-deleteBlockPool".equals(cmd)) { } else if ("-deleteBlockPool".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-deleteBlockPool datanode-host:port blockpoolId [force]]"); + " [-deleteBlockPool datanode-host:ipc_port blockpoolId [force]]");
} else if ("-setBalancerBandwidth".equals(cmd)) { } else if ("-setBalancerBandwidth".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin" System.err.println("Usage: hdfs dfsadmin"
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]"); + " [-setBalancerBandwidth <bandwidth in bytes per second>]");