diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 6945eb06292..ba152c06502 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -925,10 +925,9 @@ public final class HConstants { public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000; /** Directories that are not HBase table directories */ public static final List HBASE_NON_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME, - HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME, - HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY, - OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR, MIGRATION_NAME, LIB_DIR})); + Collections.unmodifiableList(Arrays.asList(new String[] { + HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME + })); /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java index b11d74ccb9a..e34420d6874 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -251,6 +251,7 @@ public class NamespaceUpgrade implements Tool { // Make the new directory under the ns to which we will move the table. Path nsDir = new Path(this.defNsDir, TableName.valueOf(oldTableDir.getName()).getQualifierAsString()); + LOG.info("Moving " + oldTableDir + " to " + nsDir); if (!fs.exists(nsDir.getParent())) { if (!fs.mkdirs(nsDir.getParent())) { throw new IOException("Failed to create namespace dir "+nsDir.getParent()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index c7aa4069856..02dfc5f6e8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.FSUtils; /** * Thread that walks over the filesystem, and computes the mappings @@ -79,7 +80,7 @@ class FSRegionScanner implements Runnable { int totalBlkCount = 0; // ignore null - FileStatus[] cfList = fs.listStatus(regionPath); + FileStatus[] cfList = fs.listStatus(regionPath, new FSUtils.FamilyDirFilter(fs)); if (null == cfList) { return; } @@ -90,10 +91,7 @@ class FSRegionScanner implements Runnable { // skip because this is not a CF directory continue; } - if (cfStatus.getPath().getName().startsWith(".") || - HConstants.HBASE_NON_USER_TABLE_DIRS.contains(cfStatus.getPath().getName())) { - continue; - } + FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath()); if (null == storeFileLists) { continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 3f43094cfe6..ed1a4e6aeff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -975,15 +975,14 @@ public abstract class FSUtils { final Path hbaseRootDir) throws IOException { List tableDirs = getTableDirs(fs, hbaseRootDir); + PathFilter regionFilter = new RegionDirFilter(fs); + PathFilter familyFilter = new FamilyDirFilter(fs); for (Path d : tableDirs) { - FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs)); + FileStatus[] regionDirs = fs.listStatus(d, regionFilter); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); - if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { - continue; - } // Else its a region name. Now look in region for families. - FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs)); + FileStatus[] familyDirs = fs.listStatus(dd, familyFilter); for (FileStatus familyDir : familyDirs) { Path family = familyDir.getPath(); // Now in family make sure only one file. @@ -1049,19 +1048,17 @@ public abstract class FSUtils { Map frags = new HashMap(); int cfCountTotal = 0; int cfFragTotal = 0; - DirFilter df = new DirFilter(fs); + PathFilter regionFilter = new RegionDirFilter(fs); + PathFilter familyFilter = new FamilyDirFilter(fs); List tableDirs = getTableDirs(fs, hbaseRootDir); for (Path d : tableDirs) { int cfCount = 0; int cfFrag = 0; - FileStatus[] regionDirs = fs.listStatus(d, df); + FileStatus[] regionDirs = fs.listStatus(d, regionFilter); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); - if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { - continue; - } // else its a region name, now look in region for families - FileStatus[] familyDirs = fs.listStatus(dd, df); + FileStatus[] familyDirs = fs.listStatus(dd, familyFilter); for (FileStatus familyDir : familyDirs) { cfCount++; cfCountTotal++; @@ -1083,86 +1080,6 @@ public abstract class FSUtils { return frags; } - /** - * Expects to find -ROOT- directory. - * @param fs filesystem - * @param hbaseRootDir hbase root directory - * @return True if this a pre020 layout. - * @throws IOException e - */ - public static boolean isPre020FileLayout(final FileSystem fs, - final Path hbaseRootDir) - throws IOException { - Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"), - "70236052"), "info"), "mapfiles"); - return fs.exists(mapfiles); - } - - /** - * Runs through the hbase rootdir and checks all stores have only - * one file in them -- that is, they've been major compacted. Looks - * at root and meta tables too. This version differs from - * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a - * pre-0.20.0 hbase layout on the filesystem. Used migrating. - * @param fs filesystem - * @param hbaseRootDir hbase root directory - * @return True if this hbase install is major compacted. - * @throws IOException e - */ - public static boolean isMajorCompactedPre020(final FileSystem fs, - final Path hbaseRootDir) - throws IOException { - // Presumes any directory under hbase.rootdir is a table. - List tableDirs = getTableDirs(fs, hbaseRootDir); - for (Path d: tableDirs) { - // Inside a table, there are compaction.dir directories to skip. - // Otherwise, all else should be regions. Then in each region, should - // only be family directories. Under each of these, should be a mapfile - // and info directory and in these only one file. - if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - continue; - } - FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs)); - for (FileStatus regionDir : regionDirs) { - Path dd = regionDir.getPath(); - if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { - continue; - } - // Else its a region name. Now look in region for families. - FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs)); - for (FileStatus familyDir : familyDirs) { - Path family = familyDir.getPath(); - FileStatus[] infoAndMapfile = fs.listStatus(family); - // Assert that only info and mapfile in family dir. - if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) { - LOG.debug(family.toString() + - " has more than just info and mapfile: " + infoAndMapfile.length); - return false; - } - // Make sure directory named info or mapfile. - for (int ll = 0; ll < 2; ll++) { - if (infoAndMapfile[ll].getPath().getName().equals("info") || - infoAndMapfile[ll].getPath().getName().equals("mapfiles")) - continue; - LOG.debug("Unexpected directory name: " + - infoAndMapfile[ll].getPath()); - return false; - } - // Now in family, there are 'mapfile' and 'info' subdirs. Just - // look in the 'mapfile' subdir. - FileStatus[] familyStatus = - fs.listStatus(new Path(family, "mapfiles")); - if (familyStatus.length > 1) { - LOG.debug(family.toString() + " has " + familyStatus.length + - " files."); - return false; - } - } - } - } - return true; - } - /** * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under * path rootdir @@ -1247,10 +1164,10 @@ public abstract class FSUtils { public boolean accept(Path p) { boolean isValid = false; try { - if (blacklist.contains(p.getName().toString())) { - isValid = false; - } else { + if (isValidName(p.getName())) { isValid = fs.getFileStatus(p).isDirectory(); + } else { + isValid = false; } } catch (IOException e) { LOG.warn("An error occurred while verifying if [" + p.toString() @@ -1258,6 +1175,10 @@ public abstract class FSUtils { } return isValid; } + + protected boolean isValidName(final String name) { + return !blacklist.contains(name); + } } /** @@ -1275,10 +1196,22 @@ public abstract class FSUtils { * {@link BlackListDirFilter} with a null blacklist */ public static class UserTableDirFilter extends BlackListDirFilter { - public UserTableDirFilter(FileSystem fs) { super(fs, HConstants.HBASE_NON_TABLE_DIRS); } + + protected boolean isValidName(final String name) { + if (!super.isValidName(name)) + return false; + + try { + TableName.isLegalTableQualifierName(Bytes.toBytes(name)); + } catch (IllegalArgumentException e) { + LOG.info("INVALID NAME " + name); + return false; + } + return true; + } } /** @@ -1539,15 +1472,12 @@ public abstract class FSUtils { Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. - PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); - FileStatus[] regionDirs = fs.listStatus(tableDir); + PathFilter familyFilter = new FamilyDirFilter(fs); + FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs)); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); - if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { - continue; - } // else its a region name, now look in region for families - FileStatus[] familyDirs = fs.listStatus(dd, df); + FileStatus[] familyDirs = fs.listStatus(dd, familyFilter); for (FileStatus familyDir : familyDirs) { Path family = familyDir.getPath(); // now in family, iterate over the StoreFiles and diff --git a/hbase-server/src/test/data/TestNamespaceUpgrade.tgz b/hbase-server/src/test/data/TestNamespaceUpgrade.tgz index db6e9c35441..bd91ba2fc98 100644 Binary files a/hbase-server/src/test/data/TestNamespaceUpgrade.tgz and b/hbase-server/src/test/data/TestNamespaceUpgrade.tgz differ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java index d0b218867e9..046290d7ab4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java @@ -94,7 +94,8 @@ public class TestNamespaceUpgrade { private final static String currentKeys[] = {"1","2","3","4","5","6","7","8","9","A"}; private final static TableName tables[] = - {TableName.valueOf("foo"), TableName.valueOf("ns1.foo"), TableName.valueOf("ns.two.foo")}; + {TableName.valueOf("data"), TableName.valueOf("foo"), + TableName.valueOf("ns1.foo"), TableName.valueOf("ns.two.foo")}; @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index aa1617734af..cbe80166e06 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -260,11 +260,12 @@ public class TestFSTableDescriptors { } @Test - public void testReadingArchiveDirectoryFromFS() throws IOException { + public void testReadingInvalidDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { + // .tmp dir is an invalid table name new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) - .get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); + .get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (Exception e) { LOG.debug("Correctly got error when reading a table descriptor from the archive directory: "