HBASE-12054 bad state after NamespaceUpgrade with reserved table names

This commit is contained in:
Matteo Bertozzi 2014-09-24 19:39:52 +01:00
parent bcee3609dd
commit 9152d8677e
5 changed files with 41 additions and 112 deletions

View File

@ -925,10 +925,9 @@ public final class HConstants {
public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
/** Directories that are not HBase table directories */
public static final List<String> HBASE_NON_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME,
HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME,
HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY,
OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR, MIGRATION_NAME, LIB_DIR}));
Collections.unmodifiableList(Arrays.asList(new String[] {
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
}));
/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =

View File

@ -253,6 +253,7 @@ public class NamespaceUpgrade implements Tool {
// Make the new directory under the ns to which we will move the table.
Path nsDir = new Path(this.defNsDir,
TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
LOG.info("Moving " + oldTableDir + " to " + nsDir);
if (!fs.exists(nsDir.getParent())) {
if (!fs.mkdirs(nsDir.getParent())) {
throw new IOException("Failed to create namespace dir "+nsDir.getParent());

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.FSUtils;
/**
* Thread that walks over the filesystem, and computes the mappings
@ -79,7 +80,7 @@ class FSRegionScanner implements Runnable {
int totalBlkCount = 0;
// ignore null
FileStatus[] cfList = fs.listStatus(regionPath);
FileStatus[] cfList = fs.listStatus(regionPath, new FSUtils.FamilyDirFilter(fs));
if (null == cfList) {
return;
}
@ -90,10 +91,7 @@ class FSRegionScanner implements Runnable {
// skip because this is not a CF directory
continue;
}
if (cfStatus.getPath().getName().startsWith(".") ||
HConstants.HBASE_NON_USER_TABLE_DIRS.contains(cfStatus.getPath().getName())) {
continue;
}
FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath());
if (null == storeFileLists) {
continue;

View File

@ -976,15 +976,14 @@ public abstract class FSUtils {
final Path hbaseRootDir)
throws IOException {
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
PathFilter regionFilter = new RegionDirFilter(fs);
PathFilter familyFilter = new FamilyDirFilter(fs);
for (Path d : tableDirs) {
FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// Else its a region name. Now look in region for families.
FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
for (FileStatus familyDir : familyDirs) {
Path family = familyDir.getPath();
// Now in family make sure only one file.
@ -1050,19 +1049,17 @@ public abstract class FSUtils {
Map<String, Integer> frags = new HashMap<String, Integer>();
int cfCountTotal = 0;
int cfFragTotal = 0;
DirFilter df = new DirFilter(fs);
PathFilter regionFilter = new RegionDirFilter(fs);
PathFilter familyFilter = new FamilyDirFilter(fs);
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
for (Path d : tableDirs) {
int cfCount = 0;
int cfFrag = 0;
FileStatus[] regionDirs = fs.listStatus(d, df);
FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// else its a region name, now look in region for families
FileStatus[] familyDirs = fs.listStatus(dd, df);
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
for (FileStatus familyDir : familyDirs) {
cfCount++;
cfCountTotal++;
@ -1084,86 +1081,6 @@ public abstract class FSUtils {
return frags;
}
/**
* Expects to find -ROOT- directory.
* @param fs filesystem
* @param hbaseRootDir hbase root directory
* @return True if this a pre020 layout.
* @throws IOException e
*/
public static boolean isPre020FileLayout(final FileSystem fs,
final Path hbaseRootDir)
throws IOException {
Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
"70236052"), "info"), "mapfiles");
return fs.exists(mapfiles);
}
/**
* Runs through the hbase rootdir and checks all stores have only
* one file in them -- that is, they've been major compacted. Looks
* at root and meta tables too. This version differs from
* {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
* pre-0.20.0 hbase layout on the filesystem. Used migrating.
* @param fs filesystem
* @param hbaseRootDir hbase root directory
* @return True if this hbase install is major compacted.
* @throws IOException e
*/
public static boolean isMajorCompactedPre020(final FileSystem fs,
final Path hbaseRootDir)
throws IOException {
// Presumes any directory under hbase.rootdir is a table.
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
for (Path d: tableDirs) {
// Inside a table, there are compaction.dir directories to skip.
// Otherwise, all else should be regions. Then in each region, should
// only be family directories. Under each of these, should be a mapfile
// and info directory and in these only one file.
if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
continue;
}
FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// Else its a region name. Now look in region for families.
FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
for (FileStatus familyDir : familyDirs) {
Path family = familyDir.getPath();
FileStatus[] infoAndMapfile = fs.listStatus(family);
// Assert that only info and mapfile in family dir.
if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
LOG.debug(family.toString() +
" has more than just info and mapfile: " + infoAndMapfile.length);
return false;
}
// Make sure directory named info or mapfile.
for (int ll = 0; ll < 2; ll++) {
if (infoAndMapfile[ll].getPath().getName().equals("info") ||
infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
continue;
LOG.debug("Unexpected directory name: " +
infoAndMapfile[ll].getPath());
return false;
}
// Now in family, there are 'mapfile' and 'info' subdirs. Just
// look in the 'mapfile' subdir.
FileStatus[] familyStatus =
fs.listStatus(new Path(family, "mapfiles"));
if (familyStatus.length > 1) {
LOG.debug(family.toString() + " has " + familyStatus.length +
" files.");
return false;
}
}
}
}
return true;
}
/**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
* path rootdir
@ -1248,10 +1165,10 @@ public abstract class FSUtils {
public boolean accept(Path p) {
boolean isValid = false;
try {
if (blacklist.contains(p.getName().toString())) {
isValid = false;
} else {
if (isValidName(p.getName())) {
isValid = fs.getFileStatus(p).isDirectory();
} else {
isValid = false;
}
} catch (IOException e) {
LOG.warn("An error occurred while verifying if [" + p.toString()
@ -1259,6 +1176,10 @@ public abstract class FSUtils {
}
return isValid;
}
protected boolean isValidName(final String name) {
return !blacklist.contains(name);
}
}
/**
@ -1276,10 +1197,22 @@ public abstract class FSUtils {
* {@link BlackListDirFilter} with a <tt>null</tt> blacklist
*/
public static class UserTableDirFilter extends BlackListDirFilter {
public UserTableDirFilter(FileSystem fs) {
super(fs, HConstants.HBASE_NON_TABLE_DIRS);
}
protected boolean isValidName(final String name) {
if (!super.isValidName(name))
return false;
try {
TableName.isLegalTableQualifierName(Bytes.toBytes(name));
} catch (IllegalArgumentException e) {
LOG.info("INVALID NAME " + name);
return false;
}
return true;
}
}
/**
@ -1540,15 +1473,12 @@ public abstract class FSUtils {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
// should be regions.
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
FileStatus[] regionDirs = fs.listStatus(tableDir);
PathFilter familyFilter = new FamilyDirFilter(fs);
FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
for (FileStatus regionDir : regionDirs) {
Path dd = regionDir.getPath();
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
continue;
}
// else its a region name, now look in region for families
FileStatus[] familyDirs = fs.listStatus(dd, df);
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
for (FileStatus familyDir : familyDirs) {
Path family = familyDir.getPath();
// now in family, iterate over the StoreFiles and

View File

@ -301,11 +301,12 @@ public class TestFSTableDescriptors {
}
@Test
public void testReadingArchiveDirectoryFromFS() throws IOException {
public void testReadingInvalidDirectoryFromFS() throws IOException {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
try {
// .tmp dir is an invalid table name
new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
.get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY));
.get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY));
fail("Shouldn't be able to read a table descriptor for the archive directory.");
} catch (Exception e) {
LOG.debug("Correctly got error when reading a table descriptor from the archive directory: "