HADOOP-2668 Documentation and improved logging so fact that hbase now requires migration comes as less of a surprise

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@614413 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-01-23 03:24:32 +00:00
parent 6487762f94
commit af3ca76470
8 changed files with 259 additions and 130 deletions

View File

@ -143,6 +143,8 @@ Trunk (unreleased changes)
HADOOP-2584 Web UI displays an IOException instead of the Tables
HADOOP-2650 Remove Writables.clone and use WritableUtils.clone from
hadoop instead
HADOOP-2668 Documentation and improved logging so fact that hbase now
requires migration comes as less of a surprise
IMPROVEMENTS
HADOOP-2401 Add convenience put method that takes writable

View File

@ -63,6 +63,7 @@ if [ $# = 0 ]; then
echo " regionserver run an Hbase HRegionServer node"
echo " rest run an Hbase REST server"
echo " thrift run an Hbase Thrift server"
echo " migrate upgrade an hbase.rootdir"
echo " or"
echo " CLASSNAME run the class named CLASSNAME"
echo "Most commands print help when invoked w/o parameters."
@ -219,6 +220,8 @@ elif [ "$COMMAND" = "rest" ] ; then
CLASS='org.apache.hadoop.hbase.rest.Dispatcher'
elif [ "$COMMAND" = "thrift" ] ; then
CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
elif [ "$COMMAND" = "migrate" ] ; then
CLASS='org.apache.hadoop.hbase.util.Migrate'
else
CLASS=$COMMAND
fi

View File

@ -891,8 +891,8 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
fs.mkdirs(rootdir);
FSUtils.setVersion(fs, rootdir);
} else if (!FSUtils.checkVersion(fs, rootdir)) {
throw new IOException(
"file system not correct version. Run hbase.util.Migrate");
throw new IOException("File system needs upgrade. Run " +
"the '${HBASE_HOME}/bin/hbase migrate' script");
}
if (!fs.exists(rootRegionDir)) {

View File

@ -1871,7 +1871,7 @@ public class HRegion implements HConstants {
* @param info HRegionInfo for the region
* @return qualified path of region directory
*/
static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
return new Path(
HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()),
info.getEncodedName()

View File

@ -2,13 +2,13 @@
<html>
<head />
<body bgcolor="white">
Provides HBase, the <a href="http://lucene.apache.org/hadoop">Hadoop</a>
Provides Hbase, the <a href="http://lucene.apache.org/hadoop">Hadoop</a>
simple database.
<h2><a name="requirements">Requirements</a></h2>
<ul>
<li>Java 1.5.x, preferably from <a href="http://www.java.com/en/download/">Sun</a>.</li>
<li>HBase runs on top of <a href="http://lucene.apache.org/hadoop">Hadoop</a>. Hadoop has its own set of
<li>Hbase runs on top of <a href="http://lucene.apache.org/hadoop">Hadoop</a>. Hadoop has its own set of
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
requirements and instructions</a>. Make sure to set
<code>JAVA_HOME</code> to the root of your Java installation when configuring Hadoop.
@ -17,13 +17,19 @@ requirements and instructions</a>. Make sure to set
<h2><a name="getting_started" >Getting Started</a></h2>
<p>
What follows presumes you are installing hbase for the first time. If upgrading your
hbase instance, see <a href="#upgrading">Upgrading</a>.
</p>
<p>
Start by defining the following directory variables for your convenience:
</p>
<p>
<ul>
<li><code>${HADOOP_HOME}</code>: The root directory of your Hadoop installation.</li>
<li><code>${HBASE_HOME}</code>: The HBase root, located at
<li><code>${HBASE_HOME}</code>: The Hbase root, located at
<code>${HADOOP_HOME}/src/contrib/hbase</code>.</li>
</ul>
</p>
<p>
If you are running a standalone operation, proceed to <a href=#runandconfirm>Running
and Confirming Your Installation</a>. If you are running a distributed operation, continue below.
@ -34,7 +40,7 @@ and Confirming Your Installation</a>. If you are running a distributed operatio
Make sure you have followed
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
Hadoop's instructions</a> for running a distributed operation.
Configuring HBase for a distributed operation requires modification of the following two
Configuring Hbase for a distributed operation requires modification of the following two
files: <code>${HBASE_HOME}/conf/hbase-site.xml</code> and
<code>${HBASE_HOME}/conf/regionservers</code>.
</p>
@ -44,7 +50,7 @@ files: <code>${HBASE_HOME}/conf/hbase-site.xml</code> and
should never be modified). At a minimum the <code>hbase.master</code> property should be redefined
in <code>hbase-site.xml</code> to define the <code>host:port</code> pair on which to run the
HMaster (<a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">read about the
HBase master, regionservers, etc</a>):
Hbase master, regionservers, etc</a>):
</p>
<pre>
&lt;configuration&gt;
@ -52,7 +58,7 @@ HBase master, regionservers, etc</a>):
&lt;property&gt;
&lt;name&gt;hbase.master&lt;/name&gt;
&lt;value&gt;[YOUR_HOST]:[PORT]&lt;/value&gt;
&lt;description&gt;The host and port that the HBase master runs at.
&lt;description&gt;The host and port that the Hbase master runs at.
&lt;/description&gt;
&lt;/property&gt;
@ -65,8 +71,8 @@ host per line (This file is synonymous to the slaves file at
</p>
<h3><a name="additional" >Additional Notes on Distributed Operation</a></h3>
<ul>
<li>Hadoop and HBase must be set up on each host you plan to use.</li>
<li>Additional (optional) HBase-specific variables such as HBASE_HEAPSIZE and HBASE_CLASSPATH
<li>Hadoop and Hbase must be set up on each host you plan to use.</li>
<li>Additional (optional) Hbase-specific variables such as HBASE_HEAPSIZE and HBASE_CLASSPATH
can be set in <code>${HBASE_HOME}/conf/hbase-env.sh</code>.</li>
</ul>
@ -74,42 +80,49 @@ can be set in <code>${HBASE_HOME}/conf/hbase-env.sh</code>.</li>
<p>If you are running in standalone, non-distributed mode, hbase by default uses
the local filesystem.</p>
<p>If you are running a distributed cluster you will need to start the Hadoop DFS daemons
before starting HBase and stop the daemons after HBase has shut down. Start and
before starting Hbase and stop the daemons after Hbase has shut down. Start and
stop the Hadoop DFS daemons as per the Hadoop
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. HBase
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. Hbase
does not normally use the mapreduce daemons. These do not need to be started.</p>
<p>Start HBase with the following command:
<p>Start Hbase with the following command:
</p>
<pre>
${HBASE_HOME}/bin/start-hbase.sh
</pre>
<p>
Once HBase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
shell against HBase from which you can execute HBase commands. In the HBase shell, type
<code>help;</code> to see a list of supported commands. Note that all commands in the HBase
Once Hbase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
shell against Hbase from which you can execute Hbase commands. In the Hbase shell, type
<code>help;</code> to see a list of supported commands. Note that all commands in the Hbase
shell must end with <code>;</code>. Test your installation by creating, viewing, and dropping
a table, as per the help instructions. Be patient with the <code>create</code> and
<code>drop</code> operations as they may each take 10 seconds or more. To stop hbase, exit the
HBase shell and enter:
Hbase shell and enter:
</p>
<pre>
${HBASE_HOME}/bin/stop-hbase.sh
</pre>
<p>
If you are running a distributed operation, be sure to wait until HBase has shut down completely
If you are running a distributed operation, be sure to wait until Hbase has shut down completely
before stopping the Hadoop daemons.
</p>
<p>
The default location for logs is <code>${HADOOP_HOME}/logs</code>.
</p>
<p>HBase also puts up a UI listing vital attributes. By default its deployed on the master host
<p>Hbase also puts up a UI listing vital attributes. By default its deployed on the master host
at port 60010.</p>
<h2><a name="upgrading" >Upgrading</a></h2>
<p>After installing the new hbase, before starting your cluster, run the
<code>${HBASE_DIR}/bin/hbase migrate</code> migration script. It will make any
adjustments to the filesystem data under <code>hbase.rootdir</code> necessary to run
the hbase version.
</p>
<h2><a name="related" >Related Documentation</a></h2>
<ul>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">HBase Home Page</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">HBase Architecture</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">Hbase Home Page</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase Architecture</a>
</ul>
</body>

View File

@ -65,7 +65,7 @@ public class FSUtils {
try {
if (!available) {
LOG.info("Failed file system available test. Thread: " +
LOG.fatal("File system is not available.. Thread: " +
Thread.currentThread().getName() + ": " + exception);
fs.close();
}

View File

@ -21,6 +21,7 @@
package org.apache.hadoop.hbase.util;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.io.IOException;
@ -32,13 +33,9 @@ import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -76,8 +73,9 @@ public class Migrate extends Configured implements Tool {
private final HBaseConfiguration conf;
/** Action to take when an extra file is found */
private static enum EXTRA_FILES {
/** Action to take when an extra file or unrecoverd log file is found */
private static String ACTIONS = "abort|ignore|delete|prompt";
private static enum ACTION {
/** Stop conversion */
ABORT,
/** print a warning message, but otherwise ignore */
@ -88,18 +86,21 @@ public class Migrate extends Configured implements Tool {
PROMPT
}
private static final Map<String, EXTRA_FILES> options =
new HashMap<String, EXTRA_FILES>();
private static final Map<String, ACTION> options =
new HashMap<String, ACTION>();
static {
options.put("abort", EXTRA_FILES.ABORT);
options.put("ignore", EXTRA_FILES.IGNORE);
options.put("delete", EXTRA_FILES.DELETE);
options.put("prompt", EXTRA_FILES.PROMPT);
options.put("abort", ACTION.ABORT);
options.put("ignore", ACTION.IGNORE);
options.put("delete", ACTION.DELETE);
options.put("prompt", ACTION.PROMPT);
}
private EXTRA_FILES logFiles = EXTRA_FILES.ABORT;
private EXTRA_FILES otherFiles = EXTRA_FILES.IGNORE;
private boolean readOnly = false;
private boolean migrationNeeded = false;
private boolean newRootRegion = false;
private ACTION logFiles = ACTION.IGNORE;
private ACTION otherFiles = ACTION.IGNORE;
private BufferedReader reader = null;
@ -120,56 +121,101 @@ public class Migrate extends Configured implements Tool {
}
/** {@inheritDoc} */
public int run(String[] args) throws Exception {
parseArgs(args);
public int run(String[] args) {
if (parseArgs(args) != 0) {
return -1;
}
try {
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.isMasterRunning()) {
throw new IllegalStateException(
"HBase cluster must be off-line while being upgraded");
FileSystem fs = FileSystem.get(conf); // get DFS handle
LOG.info("Verifying that file system is available...");
if (!FSUtils.isFileSystemAvailable(fs)) {
throw new IOException(
"Filesystem must be available for upgrade to run.");
}
} catch (MasterNotRunningException e) {
// ignore
}
FileSystem fs = FileSystem.get(conf); // get DFS handle
Path rootdir = fs.makeQualified(new Path( // get path for instance
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
// See if there is a file system version file
if (FSUtils.checkVersion(fs, rootdir)) {
LOG.info("file system is at current level, no upgrade necessary");
LOG.info("Verifying that HBase is not running...");
try {
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.isMasterRunning()) {
throw new IllegalStateException(
"HBase cluster must be off-line during upgrade.");
}
} catch (MasterNotRunningException e) {
// ignore
}
LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
Path rootdir = fs.makeQualified(new Path( // get HBase root dir
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
if (!fs.exists(rootdir)) {
throw new FileNotFoundException("HBase root directory " +
rootdir.toString() + " does not exist.");
}
// See if there is a file system version file
if (FSUtils.checkVersion(fs, rootdir)) {
LOG.info("No upgrade necessary.");
return 0;
}
// check to see if new root region dir exists
checkNewRootRegionDirExists(fs, rootdir);
// check for "extra" files and for old upgradable regions
extraFiles(fs, rootdir);
if (!newRootRegion) {
// find root region
Path rootRegion = new Path(rootdir,
OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
if (!fs.exists(rootRegion)) {
throw new IOException("Cannot find root region " +
rootRegion.toString());
} else if (readOnly) {
migrationNeeded = true;
} else {
migrateRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
scanRootRegion(fs, rootdir);
// scan for left over regions
extraRegions(fs, rootdir);
}
}
if (!readOnly) {
// set file system version
LOG.info("Setting file system version.");
FSUtils.setVersion(fs, rootdir);
LOG.info("Upgrade successful.");
} else if (migrationNeeded) {
LOG.info("Upgrade needed.");
}
return 0;
} catch (Exception e) {
LOG.fatal("Upgrade" + (readOnly ? " check" : "") + " failed", e);
return -1;
}
// check for "extra" files and for old upgradable regions
}
extraFiles(fs, rootdir);
// find root region
Path rootRegion = new Path(rootdir,
OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
if (!fs.exists(rootRegion)) {
throw new IOException("cannot find root region " + rootRegion.toString());
}
processRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
scanRootRegion(fs, rootdir);
// scan for left over regions
extraRegions(fs, rootdir);
// set file system version
FSUtils.setVersion(fs, rootdir);
return 0;
private void checkNewRootRegionDirExists(FileSystem fs, Path rootdir)
throws IOException {
Path rootRegionDir =
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
newRootRegion = fs.exists(rootRegionDir);
migrationNeeded = !newRootRegion;
}
// Check for files that should not be there or should be migrated
private void extraFiles(FileSystem fs, Path rootdir) throws IOException {
FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) {
@ -178,44 +224,52 @@ public class Migrate extends Configured implements Tool {
}
for (int i = 0; i < stats.length; i++) {
String name = stats[i].getPath().getName();
if (!name.startsWith(OLD_PREFIX)) {
if (name.startsWith("log_")) {
String message = "unrecovered region server log file " + name;
extraFile(logFiles, message, fs, stats[i].getPath());
if (name.startsWith(OLD_PREFIX)) {
if (!newRootRegion) {
// We need to migrate if the new root region directory doesn't exist
migrationNeeded = true;
String regionName = name.substring(OLD_PREFIX.length());
try {
Integer.parseInt(regionName);
} catch (NumberFormatException e) {
extraFile(otherFiles, "Old region format can not be upgraded: " +
name, fs, stats[i].getPath());
}
} else {
String message = "unrecognized file " + name;
extraFile(otherFiles, message, fs, stats[i].getPath());
// Since the new root region directory exists, we assume that this
// directory is not necessary
extraFile(otherFiles, "Old region directory found: " + name, fs,
stats[i].getPath());
}
} else {
String regionName = name.substring(OLD_PREFIX.length());
try {
Integer.parseInt(regionName);
} catch (NumberFormatException e) {
extraFile(otherFiles, "old region format can not be converted: " +
name, fs, stats[i].getPath());
// File name does not start with "hregion_"
if (name.startsWith("log_")) {
String message = "Unrecovered region server log file " + name +
" this file can be recovered by the master when it starts.";
extraFile(logFiles, message, fs, stats[i].getPath());
} else if (!newRootRegion) {
// new root region directory does not exist. This is an extra file
String message = "Unrecognized file " + name;
extraFile(otherFiles, message, fs, stats[i].getPath());
}
}
}
}
private void extraFile(EXTRA_FILES action, String message, FileSystem fs,
private void extraFile(ACTION action, String message, FileSystem fs,
Path p) throws IOException {
if (action == EXTRA_FILES.ABORT) {
if (action == ACTION.ABORT) {
throw new IOException(message + " aborting");
} else if (action == EXTRA_FILES.IGNORE) {
} else if (action == ACTION.IGNORE) {
LOG.info(message + " ignoring");
} else if (action == EXTRA_FILES.DELETE) {
} else if (action == ACTION.DELETE) {
LOG.info(message + " deleting");
fs.delete(p);
} else {
// logFiles == EXTRA_FILES.PROMPT
// ACTION.PROMPT
String response = prompt(message + " delete? [y/n]");
if (response.startsWith("Y") || response.startsWith("y")) {
LOG.info(message + " deleting");
fs.delete(p);
@ -223,7 +277,7 @@ public class Migrate extends Configured implements Tool {
}
}
private void processRegionDir(FileSystem fs, Path rootdir, Text tableName,
private void migrateRegionDir(FileSystem fs, Path rootdir, Text tableName,
Path oldPath) throws IOException {
// Create directory where table will live
@ -300,7 +354,7 @@ public class Migrate extends Configured implements Tool {
// First move the meta region to where it should be and rename
// subdirectories as necessary
processRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
migrateRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
new Path(rootdir, OLD_PREFIX + info.getEncodedName()));
// Now scan and process the meta table
@ -348,7 +402,7 @@ public class Migrate extends Configured implements Tool {
// Move the region to where it should be and rename
// subdirectories as necessary
processRegionDir(fs, rootdir, region.getTableDesc().getName(),
migrateRegionDir(fs, rootdir, region.getTableDesc().getName(),
new Path(rootdir, OLD_PREFIX + region.getEncodedName()));
results.clear();
@ -376,11 +430,11 @@ public class Migrate extends Configured implements Tool {
String message;
if (references.contains(encodedName)) {
message =
"region not in meta table but other regions reference it " + name;
"Region not in meta table but other regions reference it " + name;
} else {
message =
"region not in meta table and no other regions reference it " + name;
"Region not in meta table and no other regions reference it " + name;
}
extraFile(otherFiles, message, fs, stats[i].getPath());
}
@ -388,15 +442,15 @@ public class Migrate extends Configured implements Tool {
}
@SuppressWarnings("static-access")
private void parseArgs(String[] args) {
private int parseArgs(String[] args) {
Options opts = new Options();
Option logFiles = OptionBuilder.withArgName("abort|ignore|delete|prompt")
Option logFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription(
"disposition of unrecovered region server logs: {abort|ignore|delete|prompt}")
.create("logfiles");
Option extraFiles = OptionBuilder.withArgName("abort|ignore|delete|prompt")
Option extraFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription("disposition of 'extra' files: {abort|ignore|delete|prompt}")
.create("extrafiles");
@ -404,21 +458,62 @@ public class Migrate extends Configured implements Tool {
opts.addOption(logFiles);
opts.addOption(extraFiles);
CommandLineParser parser = new GnuParser();
try {
CommandLine commandLine = parser.parse(opts, args, true);
if (commandLine.hasOption("log-files")) {
this.logFiles = options.get(commandLine.getOptionValue("log-files"));
}
if (commandLine.hasOption("extra-files")) {
this.otherFiles = options.get(commandLine.getOptionValue("extra-files"));
}
} catch (ParseException e) {
LOG.error("options parsing failed", e);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("options are: ", opts);
GenericOptionsParser parser =
new GenericOptionsParser(this.getConf(), opts, args);
String[] remainingArgs = parser.getRemainingArgs();
if (remainingArgs.length != 1) {
usage();
return -1;
}
if (remainingArgs[0].compareTo("check") == 0) {
this.readOnly = true;
} else if (remainingArgs[0].compareTo("upgrade") != 0) {
usage();
return -1;
}
if (readOnly) {
this.logFiles = ACTION.IGNORE;
this.otherFiles = ACTION.IGNORE;
} else {
CommandLine commandLine = parser.getCommandLine();
ACTION action = null;
if (commandLine.hasOption("logfiles")) {
action = options.get(commandLine.getOptionValue("logfiles"));
if (action == null) {
usage();
return -1;
}
this.logFiles = action;
}
if (commandLine.hasOption("extrafiles")) {
action = options.get(commandLine.getOptionValue("extrafiles"));
if (action == null) {
usage();
return -1;
}
this.otherFiles = action;
}
}
return 0;
}
private void usage() {
System.err.println("Usage: bin/hbase migrate { check | upgrade } [options]\n");
System.err.println(" check perform upgrade checks only.");
System.err.println(" upgrade perform upgrade checks and modify hbase.\n");
System.err.println(" Options are:");
System.err.println(" -logfiles={abort|ignore|delete|prompt}");
System.err.println(" action to take when unrecovered region");
System.err.println(" server log files are found.\n");
System.err.println(" -extrafiles={abort|ignore|delete|prompt}");
System.err.println(" action to take if \"extra\" files are found.\n");
System.err.println(" -conf <configuration file> specify an application configuration file");
System.err.println(" -D <property=value> use value for given property");
System.err.println(" -fs <local|namenode:port> specify a namenode");
}
private synchronized String prompt(String prompt) {
@ -441,13 +536,9 @@ public class Migrate extends Configured implements Tool {
* @param args command line arguments
*/
public static void main(String[] args) {
Tool t = new Migrate();
GenericOptionsParser hadoopOpts =
new GenericOptionsParser(t.getConf(), args);
int status = 0;
try {
status = ToolRunner.run(t, hadoopOpts.getRemainingArgs());
status = ToolRunner.run(new Migrate(), args);
} catch (Exception e) {
LOG.error("exiting due to error", e);
status = -1;

View File

@ -111,10 +111,30 @@ public class TestMigrate extends HBaseTestCase {
listPaths(dfs, root, root.toString().length() + 1);
Migrate u = new Migrate(conf);
u.run((String[]) null);
u.run(new String[] {"check"});
listPaths(dfs, root, root.toString().length() + 1);
u = new Migrate(conf);
u.run(new String[] {"upgrade"});
listPaths(dfs, root, root.toString().length() + 1);
// Remove version file and try again
dfs.delete(new Path(root, HConstants.VERSION_FILE_NAME));
u = new Migrate(conf);
u.run(new String[] {"upgrade"});
listPaths(dfs, root, root.toString().length() + 1);
// Try again. No upgrade should be necessary
u = new Migrate(conf);
u.run(new String[] {"check"});
u = new Migrate(conf);
u.run(new String[] {"upgrade"});
} catch (Exception e) {
e.printStackTrace();
} finally {