HBASE-12054 bad state after NamespaceUpgrade with reserved table names
This commit is contained in:
parent
bcee3609dd
commit
9152d8677e
|
@ -925,10 +925,9 @@ public final class HConstants {
|
||||||
public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
|
public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000;
|
||||||
/** Directories that are not HBase table directories */
|
/** Directories that are not HBase table directories */
|
||||||
public static final List<String> HBASE_NON_TABLE_DIRS =
|
public static final List<String> HBASE_NON_TABLE_DIRS =
|
||||||
Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME,
|
Collections.unmodifiableList(Arrays.asList(new String[] {
|
||||||
HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME,
|
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
|
||||||
HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY,
|
}));
|
||||||
OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR, MIGRATION_NAME, LIB_DIR}));
|
|
||||||
|
|
||||||
/** Directories that are not HBase user table directories */
|
/** Directories that are not HBase user table directories */
|
||||||
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
|
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
|
||||||
|
|
|
@ -253,6 +253,7 @@ public class NamespaceUpgrade implements Tool {
|
||||||
// Make the new directory under the ns to which we will move the table.
|
// Make the new directory under the ns to which we will move the table.
|
||||||
Path nsDir = new Path(this.defNsDir,
|
Path nsDir = new Path(this.defNsDir,
|
||||||
TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
|
TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
|
||||||
|
LOG.info("Moving " + oldTableDir + " to " + nsDir);
|
||||||
if (!fs.exists(nsDir.getParent())) {
|
if (!fs.exists(nsDir.getParent())) {
|
||||||
if (!fs.mkdirs(nsDir.getParent())) {
|
if (!fs.mkdirs(nsDir.getParent())) {
|
||||||
throw new IOException("Failed to create namespace dir "+nsDir.getParent());
|
throw new IOException("Failed to create namespace dir "+nsDir.getParent());
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread that walks over the filesystem, and computes the mappings
|
* Thread that walks over the filesystem, and computes the mappings
|
||||||
|
@ -79,7 +80,7 @@ class FSRegionScanner implements Runnable {
|
||||||
int totalBlkCount = 0;
|
int totalBlkCount = 0;
|
||||||
|
|
||||||
// ignore null
|
// ignore null
|
||||||
FileStatus[] cfList = fs.listStatus(regionPath);
|
FileStatus[] cfList = fs.listStatus(regionPath, new FSUtils.FamilyDirFilter(fs));
|
||||||
if (null == cfList) {
|
if (null == cfList) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -90,10 +91,7 @@ class FSRegionScanner implements Runnable {
|
||||||
// skip because this is not a CF directory
|
// skip because this is not a CF directory
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (cfStatus.getPath().getName().startsWith(".") ||
|
|
||||||
HConstants.HBASE_NON_USER_TABLE_DIRS.contains(cfStatus.getPath().getName())) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath());
|
FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath());
|
||||||
if (null == storeFileLists) {
|
if (null == storeFileLists) {
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -976,15 +976,14 @@ public abstract class FSUtils {
|
||||||
final Path hbaseRootDir)
|
final Path hbaseRootDir)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
|
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
|
||||||
|
PathFilter regionFilter = new RegionDirFilter(fs);
|
||||||
|
PathFilter familyFilter = new FamilyDirFilter(fs);
|
||||||
for (Path d : tableDirs) {
|
for (Path d : tableDirs) {
|
||||||
FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
|
FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
|
||||||
for (FileStatus regionDir : regionDirs) {
|
for (FileStatus regionDir : regionDirs) {
|
||||||
Path dd = regionDir.getPath();
|
Path dd = regionDir.getPath();
|
||||||
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Else its a region name. Now look in region for families.
|
// Else its a region name. Now look in region for families.
|
||||||
FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
|
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
|
||||||
for (FileStatus familyDir : familyDirs) {
|
for (FileStatus familyDir : familyDirs) {
|
||||||
Path family = familyDir.getPath();
|
Path family = familyDir.getPath();
|
||||||
// Now in family make sure only one file.
|
// Now in family make sure only one file.
|
||||||
|
@ -1050,19 +1049,17 @@ public abstract class FSUtils {
|
||||||
Map<String, Integer> frags = new HashMap<String, Integer>();
|
Map<String, Integer> frags = new HashMap<String, Integer>();
|
||||||
int cfCountTotal = 0;
|
int cfCountTotal = 0;
|
||||||
int cfFragTotal = 0;
|
int cfFragTotal = 0;
|
||||||
DirFilter df = new DirFilter(fs);
|
PathFilter regionFilter = new RegionDirFilter(fs);
|
||||||
|
PathFilter familyFilter = new FamilyDirFilter(fs);
|
||||||
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
|
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
|
||||||
for (Path d : tableDirs) {
|
for (Path d : tableDirs) {
|
||||||
int cfCount = 0;
|
int cfCount = 0;
|
||||||
int cfFrag = 0;
|
int cfFrag = 0;
|
||||||
FileStatus[] regionDirs = fs.listStatus(d, df);
|
FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
|
||||||
for (FileStatus regionDir : regionDirs) {
|
for (FileStatus regionDir : regionDirs) {
|
||||||
Path dd = regionDir.getPath();
|
Path dd = regionDir.getPath();
|
||||||
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// else its a region name, now look in region for families
|
// else its a region name, now look in region for families
|
||||||
FileStatus[] familyDirs = fs.listStatus(dd, df);
|
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
|
||||||
for (FileStatus familyDir : familyDirs) {
|
for (FileStatus familyDir : familyDirs) {
|
||||||
cfCount++;
|
cfCount++;
|
||||||
cfCountTotal++;
|
cfCountTotal++;
|
||||||
|
@ -1084,86 +1081,6 @@ public abstract class FSUtils {
|
||||||
return frags;
|
return frags;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Expects to find -ROOT- directory.
|
|
||||||
* @param fs filesystem
|
|
||||||
* @param hbaseRootDir hbase root directory
|
|
||||||
* @return True if this a pre020 layout.
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public static boolean isPre020FileLayout(final FileSystem fs,
|
|
||||||
final Path hbaseRootDir)
|
|
||||||
throws IOException {
|
|
||||||
Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"),
|
|
||||||
"70236052"), "info"), "mapfiles");
|
|
||||||
return fs.exists(mapfiles);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Runs through the hbase rootdir and checks all stores have only
|
|
||||||
* one file in them -- that is, they've been major compacted. Looks
|
|
||||||
* at root and meta tables too. This version differs from
|
|
||||||
* {@link #isMajorCompacted(FileSystem, Path)} in that it expects a
|
|
||||||
* pre-0.20.0 hbase layout on the filesystem. Used migrating.
|
|
||||||
* @param fs filesystem
|
|
||||||
* @param hbaseRootDir hbase root directory
|
|
||||||
* @return True if this hbase install is major compacted.
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public static boolean isMajorCompactedPre020(final FileSystem fs,
|
|
||||||
final Path hbaseRootDir)
|
|
||||||
throws IOException {
|
|
||||||
// Presumes any directory under hbase.rootdir is a table.
|
|
||||||
List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
|
|
||||||
for (Path d: tableDirs) {
|
|
||||||
// Inside a table, there are compaction.dir directories to skip.
|
|
||||||
// Otherwise, all else should be regions. Then in each region, should
|
|
||||||
// only be family directories. Under each of these, should be a mapfile
|
|
||||||
// and info directory and in these only one file.
|
|
||||||
if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs));
|
|
||||||
for (FileStatus regionDir : regionDirs) {
|
|
||||||
Path dd = regionDir.getPath();
|
|
||||||
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Else its a region name. Now look in region for families.
|
|
||||||
FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs));
|
|
||||||
for (FileStatus familyDir : familyDirs) {
|
|
||||||
Path family = familyDir.getPath();
|
|
||||||
FileStatus[] infoAndMapfile = fs.listStatus(family);
|
|
||||||
// Assert that only info and mapfile in family dir.
|
|
||||||
if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) {
|
|
||||||
LOG.debug(family.toString() +
|
|
||||||
" has more than just info and mapfile: " + infoAndMapfile.length);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// Make sure directory named info or mapfile.
|
|
||||||
for (int ll = 0; ll < 2; ll++) {
|
|
||||||
if (infoAndMapfile[ll].getPath().getName().equals("info") ||
|
|
||||||
infoAndMapfile[ll].getPath().getName().equals("mapfiles"))
|
|
||||||
continue;
|
|
||||||
LOG.debug("Unexpected directory name: " +
|
|
||||||
infoAndMapfile[ll].getPath());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// Now in family, there are 'mapfile' and 'info' subdirs. Just
|
|
||||||
// look in the 'mapfile' subdir.
|
|
||||||
FileStatus[] familyStatus =
|
|
||||||
fs.listStatus(new Path(family, "mapfiles"));
|
|
||||||
if (familyStatus.length > 1) {
|
|
||||||
LOG.debug(family.toString() + " has " + familyStatus.length +
|
|
||||||
" files.");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
|
* Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
|
||||||
* path rootdir
|
* path rootdir
|
||||||
|
@ -1248,10 +1165,10 @@ public abstract class FSUtils {
|
||||||
public boolean accept(Path p) {
|
public boolean accept(Path p) {
|
||||||
boolean isValid = false;
|
boolean isValid = false;
|
||||||
try {
|
try {
|
||||||
if (blacklist.contains(p.getName().toString())) {
|
if (isValidName(p.getName())) {
|
||||||
isValid = false;
|
|
||||||
} else {
|
|
||||||
isValid = fs.getFileStatus(p).isDirectory();
|
isValid = fs.getFileStatus(p).isDirectory();
|
||||||
|
} else {
|
||||||
|
isValid = false;
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("An error occurred while verifying if [" + p.toString()
|
LOG.warn("An error occurred while verifying if [" + p.toString()
|
||||||
|
@ -1259,6 +1176,10 @@ public abstract class FSUtils {
|
||||||
}
|
}
|
||||||
return isValid;
|
return isValid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected boolean isValidName(final String name) {
|
||||||
|
return !blacklist.contains(name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1276,10 +1197,22 @@ public abstract class FSUtils {
|
||||||
* {@link BlackListDirFilter} with a <tt>null</tt> blacklist
|
* {@link BlackListDirFilter} with a <tt>null</tt> blacklist
|
||||||
*/
|
*/
|
||||||
public static class UserTableDirFilter extends BlackListDirFilter {
|
public static class UserTableDirFilter extends BlackListDirFilter {
|
||||||
|
|
||||||
public UserTableDirFilter(FileSystem fs) {
|
public UserTableDirFilter(FileSystem fs) {
|
||||||
super(fs, HConstants.HBASE_NON_TABLE_DIRS);
|
super(fs, HConstants.HBASE_NON_TABLE_DIRS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected boolean isValidName(final String name) {
|
||||||
|
if (!super.isValidName(name))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
TableName.isLegalTableQualifierName(Bytes.toBytes(name));
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
LOG.info("INVALID NAME " + name);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1540,15 +1473,12 @@ public abstract class FSUtils {
|
||||||
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
|
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
|
||||||
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
|
// Inside a table, there are compaction.dir directories to skip. Otherwise, all else
|
||||||
// should be regions.
|
// should be regions.
|
||||||
PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS);
|
PathFilter familyFilter = new FamilyDirFilter(fs);
|
||||||
FileStatus[] regionDirs = fs.listStatus(tableDir);
|
FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
|
||||||
for (FileStatus regionDir : regionDirs) {
|
for (FileStatus regionDir : regionDirs) {
|
||||||
Path dd = regionDir.getPath();
|
Path dd = regionDir.getPath();
|
||||||
if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// else its a region name, now look in region for families
|
// else its a region name, now look in region for families
|
||||||
FileStatus[] familyDirs = fs.listStatus(dd, df);
|
FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
|
||||||
for (FileStatus familyDir : familyDirs) {
|
for (FileStatus familyDir : familyDirs) {
|
||||||
Path family = familyDir.getPath();
|
Path family = familyDir.getPath();
|
||||||
// now in family, iterate over the StoreFiles and
|
// now in family, iterate over the StoreFiles and
|
||||||
|
|
|
@ -301,11 +301,12 @@ public class TestFSTableDescriptors {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReadingArchiveDirectoryFromFS() throws IOException {
|
public void testReadingInvalidDirectoryFromFS() throws IOException {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
try {
|
try {
|
||||||
|
// .tmp dir is an invalid table name
|
||||||
new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
|
new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
|
||||||
.get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY));
|
.get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY));
|
||||||
fail("Shouldn't be able to read a table descriptor for the archive directory.");
|
fail("Shouldn't be able to read a table descriptor for the archive directory.");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.debug("Correctly got error when reading a table descriptor from the archive directory: "
|
LOG.debug("Correctly got error when reading a table descriptor from the archive directory: "
|
||||||
|
|
Loading…
Reference in New Issue