HADOOP-2643 Make migration tool smarter.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@613446 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-01-19 20:20:15 +00:00
parent ad1e1f9ca9
commit 5ea6853f90
6 changed files with 79 additions and 3 deletions

View File

@ -201,7 +201,8 @@ Trunk (unreleased changes)
HMaster shutdown
HADOOP-2616 hbase not spliting when the total size of region reaches max
region size * 1.5
HADOOP-2643 Make migration tool smarter.
Release 0.15.1
Branch 0.15

View File

@ -26,6 +26,14 @@ import org.apache.hadoop.io.Text;
*/
public interface HConstants {
// For migration
/** name of version file */
static final String VERSION_FILE_NAME = "hbase.version";
/** version of file system */
static final String FILE_SYSTEM_VERSION = "0.1";
// Configuration parameters
// TODO: URL for hbase master like hdfs URLs with host and port.

View File

@ -889,6 +889,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
// Make sure the root directory exists!
if(! fs.exists(rootdir)) {
fs.mkdirs(rootdir);
FSUtils.setVersion(fs, rootdir);
} else if (!FSUtils.checkVersion(fs, rootdir)) {
throw new IOException(
"file system not correct version. Run hbase.util.Migrate");
}
if (!fs.exists(rootRegionDir)) {

View File

@ -19,12 +19,16 @@
*/
package org.apache.hadoop.hbase.util;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.dfs.DistributedFileSystem;
/**
@ -71,4 +75,40 @@ public class FSUtils {
}
return available;
}
/**
* Verifies current version of file system
*
* @param fs
* @param rootdir
* @return true if the current file system is the correct version
* @throws IOException
*/
public static boolean checkVersion(FileSystem fs, Path rootdir) throws IOException {
Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
boolean versionOk = false;
if (fs.exists(versionFile)) {
FSDataInputStream s =
fs.open(new Path(rootdir, HConstants.VERSION_FILE_NAME));
String version = DataInputStream.readUTF(s);
s.close();
versionOk = version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0;
}
return versionOk;
}
/**
* Sets version of file system
*
* @param fs
* @param rootdir
* @throws IOException
*/
public static void setVersion(FileSystem fs, Path rootdir) throws IOException {
FSDataOutputStream s =
fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
s.writeUTF(HConstants.FILE_SYSTEM_VERSION);
s.close();
}
}

View File

@ -136,7 +136,14 @@ public class Migrate extends Configured implements Tool {
Path rootdir = fs.makeQualified(new Path( // get path for instance
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
// check for "extra" files
// See if there is a file system version file
if (FSUtils.checkVersion(fs, rootdir)) {
LOG.info("file system is at current level, no upgrade necessary");
return 0;
}
// check for "extra" files and for old upgradable regions
extraFiles(fs, rootdir);
@ -155,10 +162,14 @@ public class Migrate extends Configured implements Tool {
// scan for left over regions
extraRegions(fs, rootdir);
// set file system version
FSUtils.setVersion(fs, rootdir);
return 0;
}
private void extraFiles(FileSystem fs, Path rootdir) throws IOException {
FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) {
@ -175,6 +186,15 @@ public class Migrate extends Configured implements Tool {
String message = "unrecognized file " + name;
extraFile(otherFiles, message, fs, stats[i].getPath());
}
} else {
String regionName = name.substring(OLD_PREFIX.length());
try {
Integer.parseInt(regionName);
} catch (NumberFormatException e) {
extraFile(otherFiles, "old region format can not be converted: " +
name, fs, stats[i].getPath());
}
}
}
}

View File

@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Logger;
import org.apache.hadoop.hbase.util.FSUtils;
/**
* This class creates a single process HBase cluster. One thread is created for
* each server.
@ -138,6 +140,7 @@ public class MiniHBaseCluster implements HConstants {
try {
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
fs.mkdirs(parentdir);
FSUtils.setVersion(fs, parentdir);
this.hbaseCluster = new LocalHBaseCluster(this.conf, nRegionNodes);
this.hbaseCluster.startup();
} catch(IOException e) {