HBASE-21536 Fix completebulkload usage instructions
Signed-off-by: Peter Somogyi <psomogyi@apache.org>
This commit is contained in:
parent
0e79e70545
commit
420fbba6ae
57
bin/hbase
57
bin/hbase
|
@ -91,35 +91,36 @@ if [ $# = 0 ]; then
|
||||||
echo ""
|
echo ""
|
||||||
echo "Commands:"
|
echo "Commands:"
|
||||||
echo "Some commands take arguments. Pass no args or -h for usage."
|
echo "Some commands take arguments. Pass no args or -h for usage."
|
||||||
echo " shell Run the HBase shell"
|
echo " shell Run the HBase shell"
|
||||||
echo " hbck Run the HBase 'fsck' tool. Defaults read-only hbck1."
|
echo " hbck Run the HBase 'fsck' tool. Defaults read-only hbck1."
|
||||||
echo " Pass '-j /path/to/HBCK2.jar' to run hbase-2.x HBCK2."
|
echo " Pass '-j /path/to/HBCK2.jar' to run hbase-2.x HBCK2."
|
||||||
echo " snapshot Tool for managing snapshots"
|
echo " snapshot Tool for managing snapshots"
|
||||||
if [ "${in_omnibus_tarball}" = "true" ]; then
|
if [ "${in_omnibus_tarball}" = "true" ]; then
|
||||||
echo " wal Write-ahead-log analyzer"
|
echo " wal Write-ahead-log analyzer"
|
||||||
echo " hfile Store file analyzer"
|
echo " hfile Store file analyzer"
|
||||||
echo " zkcli Run the ZooKeeper shell"
|
echo " zkcli Run the ZooKeeper shell"
|
||||||
echo " master Run an HBase HMaster node"
|
echo " master Run an HBase HMaster node"
|
||||||
echo " regionserver Run an HBase HRegionServer node"
|
echo " regionserver Run an HBase HRegionServer node"
|
||||||
echo " zookeeper Run a ZooKeeper server"
|
echo " zookeeper Run a ZooKeeper server"
|
||||||
echo " rest Run an HBase REST server"
|
echo " rest Run an HBase REST server"
|
||||||
echo " thrift Run the HBase Thrift server"
|
echo " thrift Run the HBase Thrift server"
|
||||||
echo " thrift2 Run the HBase Thrift2 server"
|
echo " thrift2 Run the HBase Thrift2 server"
|
||||||
echo " clean Run the HBase clean up script"
|
echo " clean Run the HBase clean up script"
|
||||||
fi
|
fi
|
||||||
echo " classpath Dump hbase CLASSPATH"
|
echo " classpath Dump hbase CLASSPATH"
|
||||||
echo " mapredcp Dump CLASSPATH entries required by mapreduce"
|
echo " mapredcp Dump CLASSPATH entries required by mapreduce"
|
||||||
echo " pe Run PerformanceEvaluation"
|
echo " pe Run PerformanceEvaluation"
|
||||||
echo " ltt Run LoadTestTool"
|
echo " ltt Run LoadTestTool"
|
||||||
echo " canary Run the Canary tool"
|
echo " canary Run the Canary tool"
|
||||||
echo " version Print the version"
|
echo " version Print the version"
|
||||||
echo " backup Backup tables for recovery"
|
echo " backup Backup tables for recovery"
|
||||||
echo " restore Restore tables from existing backup image"
|
echo " restore Restore tables from existing backup image"
|
||||||
echo " regionsplitter Run RegionSplitter tool"
|
echo " completebulkload Run BulkLoadHFiles tool"
|
||||||
echo " rowcounter Run RowCounter tool"
|
echo " regionsplitter Run RegionSplitter tool"
|
||||||
echo " cellcounter Run CellCounter tool"
|
echo " rowcounter Run RowCounter tool"
|
||||||
echo " pre-upgrade Run Pre-Upgrade validator tool"
|
echo " cellcounter Run CellCounter tool"
|
||||||
echo " CLASSNAME Run the class named CLASSNAME"
|
echo " pre-upgrade Run Pre-Upgrade validator tool"
|
||||||
|
echo " CLASSNAME Run the class named CLASSNAME"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -645,6 +646,8 @@ elif [ "$COMMAND" = "cellcounter" ] ; then
|
||||||
CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
|
CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
|
||||||
elif [ "$COMMAND" = "pre-upgrade" ] ; then
|
elif [ "$COMMAND" = "pre-upgrade" ] ; then
|
||||||
CLASS='org.apache.hadoop.hbase.tool.PreUpgradeValidator'
|
CLASS='org.apache.hadoop.hbase.tool.PreUpgradeValidator'
|
||||||
|
elif [ "$COMMAND" = "completebulkload" ] ; then
|
||||||
|
CLASS='org.apache.hadoop.hbase.tool.BulkLoadHFilesTool'
|
||||||
else
|
else
|
||||||
CLASS=$COMMAND
|
CLASS=$COMMAND
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -182,13 +182,16 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void usage() {
|
private void usage() {
|
||||||
System.err.println("usage: " + NAME + " /path/to/hfileoutputformat-output tablename -loadTable"
|
System.err.println("usage: " + "bin/hbase completebulkload <-Dargs> "
|
||||||
+ "\n -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by "
|
+ "</path/to/hfileoutputformat-output> <tablename>\n"
|
||||||
+ "this tool\n Note: if you set this to 'no', then the target table must already exist "
|
+ "\t-D" + CREATE_TABLE_CONF_KEY + "=no can be used to avoid creation "
|
||||||
+ "in HBase\n -loadTable implies your baseDirectory to store file has a depth of 3 ,you"
|
+ "of a table by this tool.\n"
|
||||||
+ " must have an existing table\n-D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes - can be used "
|
+ "\t Note: if you set this to 'no', then target table must already exist.\n"
|
||||||
+ "to ignore unmatched column families\n" +
|
+ "\t-D" + IGNORE_UNMATCHED_CF_CONF_KEY + "=yes can be used to ignore "
|
||||||
"\n");
|
+ "unmatched column families.\n"
|
||||||
|
+ "\t-loadTable switch implies your baseDirectory to store file has a "
|
||||||
|
+ "depth of 3, table must exist\n"
|
||||||
|
+ "\t and -loadTable switch is the last option on the command line.\n\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -841,7 +841,7 @@ $ bin/hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles <hdfs://storefile
|
||||||
|
|
||||||
.Driver
|
.Driver
|
||||||
----
|
----
|
||||||
HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar completebulkload <hdfs://storefileoutput> <tablename>
|
HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-mapreduce-VERSION.jar completebulkload <hdfs://storefileoutput> <tablename>
|
||||||
----
|
----
|
||||||
|
|
||||||
[[completebulkload.warning]]
|
[[completebulkload.warning]]
|
||||||
|
|
Loading…
Reference in New Issue