HBASE-1215 part2; check that all has been major compacted before starting migration
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@794471 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
797e2316bc
commit
525d1f985d
|
@ -268,7 +268,6 @@ public class FSUtils {
|
|||
return fs.exists(rootRegionDir);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Runs through the hbase rootdir and checks all stores have only
|
||||
* one file in them -- that is, they've been major compacted. Looks
|
||||
|
@ -311,6 +310,67 @@ public class FSUtils {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expects to find -ROOT- directory.
|
||||
* @param fs
|
||||
* @param hbaseRootDir
|
||||
* @return True if this a pre020 layout.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isPre020FileLayout(final FileSystem fs,
|
||||
final Path hbaseRootDir)
|
||||
throws IOException {
|
||||
Path mapfiles = new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
|
||||
"70236052"), "mapfiles");
|
||||
return fs.exists(mapfiles);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs through the hbase rootdir and checks all stores have only
|
||||
* one file in them -- that is, they've been major compacted. Looks
|
||||
* at root and meta tables too. This version differs from
|
||||
* {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
|
||||
* pre-0.20.0 hbase layout on the filesystem. Used migrating.
|
||||
* @param fs
|
||||
* @param hbaseRootDir
|
||||
* @return True if this hbase install is major compacted.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isMajorCompactedPre020(final FileSystem fs,
|
||||
final Path hbaseRootDir)
|
||||
throws IOException {
|
||||
// Presumes any directory under hbase.rootdir is a table.
|
||||
FileStatus [] directories = fs.listStatus(hbaseRootDir, new DirFilter(fs));
|
||||
for (int i = 0; i < directories.length; i++) {
|
||||
// Inside a table, there are compaction.dir directories to skip.
|
||||
// Otherwise, all else should be regions. Then in each region, should
|
||||
// only be family directories. Under each of these, should be a mapfile
|
||||
// and info directory and in these only one file.
|
||||
Path d = directories[i].getPath();
|
||||
if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) continue;
|
||||
FileStatus [] tablesubdirectories = fs.listStatus(d, new DirFilter(fs));
|
||||
for (int j = 0; j < tablesubdirectories.length; j++) {
|
||||
Path dd = tablesubdirectories[j].getPath();
|
||||
if (dd.equals(HConstants.HREGION_COMPACTIONDIR_NAME)) continue;
|
||||
// Else its a region name. Now look in region for families.
|
||||
FileStatus [] familydirectories = fs.listStatus(dd, new DirFilter(fs));
|
||||
for (int k = 0; k < familydirectories.length; k++) {
|
||||
Path family = familydirectories[k].getPath();
|
||||
// Now in family, there are 'mapfile' and 'info' subdirs. Just
|
||||
// look in the 'mapfile' subdir.
|
||||
FileStatus [] familyStatus =
|
||||
fs.listStatus(new Path(family, "mapfiles"));
|
||||
if (familyStatus.length > 1) {
|
||||
LOG.debug(family.toString() + " has " + familyStatus.length +
|
||||
" files.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link PathFilter} that returns directories.
|
||||
*/
|
||||
|
|
|
@ -26,10 +26,8 @@ import org.apache.commons.cli.Options;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -37,7 +35,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
@ -217,17 +214,20 @@ public class Migrate extends Configured implements Tool {
|
|||
return;
|
||||
}
|
||||
// Before we start, make sure all is major compacted.
|
||||
if (!isMajorCompacted(fs, conf)) {
|
||||
String msg = "All tables must be major compacted before the migration can begin." +
|
||||
MIGRATION_LINK;
|
||||
System.out.println(msg);
|
||||
throw new IOException(msg);
|
||||
Path hbaseRootDir = new Path(conf.get(HConstants.HBASE_DIR));
|
||||
boolean pre020 = FSUtils.isPre020FileLayout(fs, hbaseRootDir);
|
||||
if (pre020) {
|
||||
if (!FSUtils.isMajorCompactedPre020(fs, hbaseRootDir)) {
|
||||
String msg = "All tables must be major compacted before migration." +
|
||||
MIGRATION_LINK;
|
||||
System.out.println(msg);
|
||||
throw new IOException(msg);
|
||||
}
|
||||
// TODO: Rewrite regions.
|
||||
}
|
||||
// TOOD: Verify all has been brought over from old to new layout.
|
||||
final MetaUtils utils = new MetaUtils(this.conf);
|
||||
try {
|
||||
// Preperation
|
||||
// TODO: Fail if not all major compacted first
|
||||
|
||||
// TODO: Set the .META. and -ROOT- to flush at 16k? 32k?
|
||||
// TODO: Enable block cache on all tables
|
||||
// TODO: Rewrite MEMCACHE_FLUSHSIZE as MEMSTORE_FLUSHSIZE – name has changed.
|
||||
|
@ -270,33 +270,6 @@ set to control the master's address (not mandatory).
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs through the hbase rootdir and checks all stores have only
|
||||
* one file in them -- that is, they've been major compacted. Looks
|
||||
* at root and meta tables too.
|
||||
* @param fs
|
||||
* @param c
|
||||
* @return True if this hbase install is major compacted.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static boolean isMajorCompacted(final FileSystem fs,
|
||||
final HBaseConfiguration c)
|
||||
throws IOException {
|
||||
FileStatus [] directories =
|
||||
fs.listStatus(new Path(c.get(HConstants.HBASE_DIR)), new PathFilter() {
|
||||
public boolean accept(Path p) {
|
||||
boolean isdir = false;
|
||||
try {
|
||||
isdir = fs.getFileStatus(p).isDir();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return isdir;
|
||||
}
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable blockcaching on catalog tables.
|
||||
* @param mr
|
||||
|
|
Binary file not shown.
Loading…
Reference in New Issue