HBASE-8778 Region assigments scan table directory making them slow for huge tables

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1510977 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-08-06 14:43:09 +00:00
parent f06af44932
commit 10a25c52cf
23 changed files with 597 additions and 391 deletions

View File

@ -452,10 +452,14 @@ public class MasterFileSystem {
// Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c);
} else {
// Migrate table descriptor files if necessary
org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
.migrateFSTableDescriptorsIfNecessary(fs, rd);
}
// Create tableinfo-s for META if not already there.
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.META_TABLEDESC, false);
new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
return rd;
}
@ -491,7 +495,7 @@ public class MasterFileSystem {
LOG.info("BOOTSTRAP: creating META region");
try {
// Bootstrapping, make sure blockcache is off. Else, one will be
// created here in bootstap and it'll need to be cleaned up. Better to
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
@ -589,16 +593,6 @@ public class MasterFileSystem {
}
}
/**
* Create new HTableDescriptor in HDFS.
*
* @param htableDescriptor
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
}
/**
* Delete column of a table
* @param tableName

View File

@ -202,8 +202,9 @@ public class CreateTableHandler extends EventHandler {
FileSystem fs = fileSystemManager.getFileSystem();
// 1. Create Table Descriptor
FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor);
Path tempTableDir = new Path(tempdir, tableName);
new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
tempTableDir, this.hTableDescriptor, false);
Path tableDir = new Path(fileSystemManager.getRootDir(), tableName);
// 2. Create Regions

View File

@ -131,7 +131,7 @@ public final class MasterSnapshotVerifier {
* @param snapshotDir snapshot directory to check
*/
private void verifyTableInfo(Path snapshotDir) throws IOException {
FSTableDescriptors.getTableDescriptor(fs, snapshotDir);
FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
}
/**

View File

@ -667,7 +667,8 @@ public class SnapshotManager implements Stoppable {
// read snapshot information
SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
HTableDescriptor snapshotTableDesc = FSTableDescriptors.getTableDescriptor(fs, snapshotDir);
HTableDescriptor snapshotTableDesc =
FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
String tableName = reqSnapshot.getTable();
// stop tracking "abandoned" handlers

View File

@ -51,7 +51,6 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -113,12 +112,12 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
@ -130,7 +129,7 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}

View File

@ -317,7 +317,7 @@ public final class SnapshotInfo extends Configured implements Tool {
}
snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotTableDesc = FSTableDescriptors.getTableDescriptor(fs, snapshotDir);
snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
return true;
}

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
/**
@ -61,12 +60,14 @@ public class TableInfoCopyTask extends SnapshotTask {
LOG.debug("Attempting to copy table info for snapshot:"
+ ClientSnapshotDescriptionUtils.toString(this.snapshot));
// get the HTable descriptor
HTableDescriptor orig = FSTableDescriptors.getTableDescriptor(fs, rootDir,
Bytes.toBytes(this.snapshot.getTable()));
HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
this.snapshot.getTable());
this.rethrowException();
// write a copy of descriptor to the snapshot directory
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, snapshotDir, orig, false);
new FSTableDescriptors(fs, rootDir)
.createTableDescriptorForTableDirectory(snapshotDir, orig, false);
LOG.debug("Finished copying tableinfo.");
return null;
}

View File

@ -0,0 +1,137 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
/**
* A class to migrate table descriptor files to a dedicated subdir.
* Invoked by HMaster.finishInitialization before accessing table descriptors.
* Migrates snapshots, user tables, and system tables.
*
* @deprecated will be removed for the major release after 0.96.
*/
@Deprecated
public class FSTableDescriptorMigrationToSubdir {
private static final Log LOG = LogFactory.getLog(FSTableDescriptorMigrationToSubdir.class);
public static void migrateFSTableDescriptorsIfNecessary(FileSystem fs, Path rootDir)
throws IOException {
if (needsMigration(fs, rootDir)) {
migrateFsTableDescriptors(fs, rootDir);
LOG.info("Migration complete.");
}
}
/**
* Determines if migration is required by checking to see whether the META table has been
* migrated.
*/
private static boolean needsMigration(FileSystem fs, Path rootDir) throws IOException {
Path metaTableDir = FSTableDescriptors.getTableDirectory(rootDir,
Bytes.toString(HConstants.META_TABLE_NAME));
FileStatus metaTableInfoStatus =
FSTableDescriptors.getTableInfoPath(fs, metaTableDir);
return metaTableInfoStatus == null;
}
/**
* Migrates all snapshots, user tables and system tables that require migration.
* First migrates snapshots.
* Then migrates each user table in order,
* then attempts ROOT (should be gone)
* Migrates META last to indicate migration is complete.
*/
private static void migrateFsTableDescriptors(FileSystem fs, Path rootDir) throws IOException {
// First migrate snapshots - will migrate any snapshot dir that contains a table info file
Path snapshotsDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
if (fs.exists(snapshotsDir)) {
LOG.info("Migrating snapshots");
FileStatus[] snapshots = fs.listStatus(snapshotsDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
for (FileStatus snapshot : snapshots) {
migrateTable(fs, snapshot.getPath());
}
}
LOG.info("Migrating user tables");
List<Path> userTableDirs = FSUtils.getTableDirs(fs, rootDir);
for (Path userTableDir : userTableDirs) {
migrateTable(fs, userTableDir);
}
LOG.info("Migrating system tables");
migrateTableIfExists(fs, rootDir, HConstants.ROOT_TABLE_NAME);
// migrate meta last because that's what we check to see if migration is complete
migrateTableIfExists(fs, rootDir, HConstants.META_TABLE_NAME);
}
private static void migrateTableIfExists(FileSystem fs, Path rootDir, byte[] tableName)
throws IOException {
Path tableDir = FSTableDescriptors.getTableDirectory(rootDir, Bytes.toString(tableName));
if (fs.exists(tableDir)) {
migrateTable(fs, tableDir);
}
}
/**
* Migrates table info files.
* Moves the latest table info file (is present) from the table dir to the table info subdir.
* Removes any older table info files from the table dir and any existing table info subdir.
*/
private static void migrateTable(FileSystem fs, Path tableDir) throws IOException {
FileStatus oldTableStatus = FSTableDescriptors.getCurrentTableInfoStatus(fs, tableDir, true);
if (oldTableStatus == null) {
LOG.debug("No table info file to migrate for " + tableDir);
return;
}
Path tableInfoDir = new Path(tableDir, FSTableDescriptors.TABLEINFO_DIR);
// remove table info subdir if it already exists
boolean removedExistingSubdir = FSUtils.deleteDirectory(fs, tableInfoDir);
if (removedExistingSubdir) {
LOG.info("Removed existing subdir at: " + tableInfoDir);
}
boolean createdSubdir = fs.mkdirs(tableInfoDir);
if (!createdSubdir) {
throw new IOException("Unable to create new table info directory: " + tableInfoDir);
}
Path oldTableInfoPath = oldTableStatus.getPath();
Path newTableInfoPath = new Path(tableInfoDir, oldTableInfoPath.getName());
boolean renamedInfoFile = fs.rename(oldTableInfoPath, newTableInfoPath);
if (!renamedInfoFile) {
throw new IOException("Failed to move table info file from old location: "
+ oldTableInfoPath + " to new location: " + newTableInfoPath);
}
LOG.info("Migrated table info from: " + oldTableInfoPath
+ " to new location: " + newTableInfoPath);
}
}

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
@ -39,25 +38,27 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.Ints;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
* passed filesystem. It expects descriptors to be in a file under the
* table's directory in FS. Can be read-only -- i.e. does not modify
* the filesystem or can be read and write.
* passed filesystem. It expects descriptors to be in a file in the
* {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only
* -- i.e. does not modify the filesystem or can be read and write.
*
* <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept under the table directory in the filesystem.
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
* The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
* of the table directory in the filesystem.
* It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the
* edit sequenceid: e.g. <code>.tableinfo.0000000003</code>. This sequenceid
* is always increasing. It starts at zero. The table schema file with the
* highest sequenceid has the most recent schema edit. Usually there is one file
@ -72,27 +73,29 @@ public class FSTableDescriptors implements TableDescriptors {
private final FileSystem fs;
private final Path rootdir;
private final boolean fsreadonly;
long cachehits = 0;
long invocations = 0;
@VisibleForTesting long cachehits = 0;
@VisibleForTesting long invocations = 0;
/** The file name used to store HTD in HDFS */
public static final String TABLEINFO_NAME = ".tableinfo";
/** The file name prefix used to store HTD in HDFS */
static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
private final Map<String, TableDescriptorModtime> cache =
new ConcurrentHashMap<String, TableDescriptorModtime>();
private final Map<String, TableDescriptorAndModtime> cache =
new ConcurrentHashMap<String, TableDescriptorAndModtime>();
/**
* Data structure to hold modification time and table descriptor.
*/
static class TableDescriptorModtime {
private final HTableDescriptor descriptor;
private static class TableDescriptorAndModtime {
private final HTableDescriptor htd;
private final long modtime;
TableDescriptorModtime(final long modtime, final HTableDescriptor htd) {
this.descriptor = htd;
TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) {
this.htd = htd;
this.modtime = modtime;
}
@ -101,30 +104,40 @@ public class FSTableDescriptors implements TableDescriptors {
}
HTableDescriptor getTableDescriptor() {
return this.descriptor;
return this.htd;
}
}
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
* conf and the filesystem where that root dir lives.
* This instance can do write operations (is not read only).
*/
public FSTableDescriptors(final Configuration conf) throws IOException {
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
}
public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
this(fs, rootdir, false);
}
/**
* @param fs
* @param rootdir
* @param fsreadOnly True if we are read-only when it comes to filesystem
* @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs.
*/
public FSTableDescriptors(final FileSystem fs, final Path rootdir,
final boolean fsreadOnly) {
public FSTableDescriptors(final FileSystem fs,
final Path rootdir, final boolean fsreadonly) {
super();
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadOnly;
this.fsreadonly = fsreadonly;
}
/* (non-Javadoc)
* @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String)
/**
* Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@Override
public HTableDescriptor get(final byte [] tablename)
@ -132,8 +145,11 @@ public class FSTableDescriptors implements TableDescriptors {
return get(Bytes.toString(tablename));
}
/* (non-Javadoc)
* @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[])
/**
* Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@Override
public HTableDescriptor get(final String tablename)
@ -150,23 +166,23 @@ public class FSTableDescriptors implements TableDescriptors {
// .META. and -ROOT- is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception.
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
throw new IOException("No descriptor found for table = " + tablename);
throw new IOException("No descriptor found for non table = " + tablename);
}
// Look in cache of descriptors.
TableDescriptorModtime cachedtdm = this.cache.get(tablename);
TableDescriptorAndModtime cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
// Check mod time has not changed (this is trip to NN).
if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) {
if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) {
cachehits++;
return cachedtdm.getTableDescriptor();
}
}
TableDescriptorModtime tdmt = null;
TableDescriptorAndModtime tdmt = null;
try {
tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename);
tdmt = getTableDescriptorAndModtime(tablename);
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, e);
@ -185,8 +201,8 @@ public class FSTableDescriptors implements TableDescriptors {
return tdmt == null ? null : tdmt.getTableDescriptor();
}
/* (non-Javadoc)
* @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
/**
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, HTableDescriptor> getAll()
@ -208,8 +224,15 @@ public class FSTableDescriptors implements TableDescriptors {
return htds;
}
/**
* Adds (or updates) the table descriptor to the FileSystem
* and updates the local cache with it.
*/
@Override
public void add(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
throw new NotImplementedException();
}
@ -217,108 +240,179 @@ public class FSTableDescriptors implements TableDescriptors {
throw new NotImplementedException();
}
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
throw new NotImplementedException();
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
}
if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
updateTableDescriptor(htd);
long modtime = getTableInfoModtime(htd.getNameAsString());
this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd));
}
/**
* Removes the table descriptor from the local cache and returns it.
* If not in read only mode, it also deletes the entire table directory(!)
* from the FileSystem.
*/
@Override
public HTableDescriptor remove(final String tablename)
throws IOException {
if (!this.fsreadonly) {
Path tabledir = FSUtils.getTablePath(this.rootdir, tablename);
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
}
Path tabledir = getTableDirectory(tablename);
if (this.fs.exists(tabledir)) {
if (!this.fs.delete(tabledir, true)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
}
TableDescriptorModtime tdm = this.cache.remove(tablename);
TableDescriptorAndModtime tdm = this.cache.remove(tablename);
return tdm == null ? null : tdm.getTableDescriptor();
}
/**
* Checks if <code>.tableinfo<code> exists for given table
* Checks if a current table info file exists for the given table
*
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
String tableName) throws IOException {
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
return status == null? false: fs.exists(status.getPath());
}
private static FileStatus getTableInfoPath(final FileSystem fs,
final Path rootdir, final String tableName)
throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, tableName);
return getTableInfoPath(fs, tabledir);
public boolean isTableInfoExists(String tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Looks under the table directory in the filesystem for files with a
* {@link #TABLEINFO_NAME} prefix. Returns reference to the 'latest' instance.
* @param fs
* @param tabledir
* @return The 'current' tableinfo file.
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
*/
private FileStatus getTableInfoPath(final String tableName) throws IOException {
Path tableDir = getTableDirectory(tableName);
return getTableInfoPath(tableDir);
}
private FileStatus getTableInfoPath(Path tableDir)
throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly);
}
/**
* Find the most current table info file for the table located in the given table directory.
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
public static FileStatus getTableInfoPath(final FileSystem fs,
final Path tabledir)
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() {
@Override
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_NAME);
}
});
if (status == null || status.length < 1) return null;
Arrays.sort(status, new FileStatusFileNameComparator());
if (status.length > 1) {
// Clean away old versions of .tableinfo
for (int i = 1; i < status.length; i++) {
Path p = status[i].getPath();
// Clean up old versions
if (!fs.delete(p, false)) {
LOG.warn("Failed cleanup of " + p);
} else {
LOG.debug("Cleaned up old tableinfo file " + p);
}
}
}
return status[0];
return getTableInfoPath(fs, tableDir, false);
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}.
* Returns in reverse order.
* Find the most current table info file for the table in the given table directory.
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if
* present or no sequence number at all if none exist (for backward compatibility from before
* there were sequence numbers).
* If there are multiple table info files found and removeOldFiles is true it also deletes the
* older files.
*
* @return The file status of the current table info file or null if none exist
* @throws IOException
*/
static class FileStatusFileNameComparator
implements Comparator<FileStatus> {
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
throws IOException {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
}
/**
* Find the most current table info file in the given directory
*
* Looks within the given directory for any table info files
* and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
* If there are multiple possible files found
* and the we're not in read only mode it also deletes the older files.
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
// only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
if (status == null || status.length < 1) return null;
FileStatus mostCurrent = null;
for (FileStatus file : status) {
if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
mostCurrent = file;
}
}
if (removeOldFiles && status.length > 1) {
// Clean away old versions
for (FileStatus file : status) {
Path path = file.getPath();
if (file != mostCurrent) {
if (!fs.delete(file.getPath(), false)) {
LOG.warn("Failed cleanup of " + path);
} else {
LOG.debug("Cleaned up old tableinfo file " + path);
}
}
}
}
return mostCurrent;
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
* reverse order.
*/
@VisibleForTesting
static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR =
new Comparator<FileStatus>() {
@Override
public int compare(FileStatus left, FileStatus right) {
return -left.compareTo(right);
}};
/**
* Return the table directory in HDFS
*/
@VisibleForTesting Path getTableDirectory(final String tableName) {
return getTableDirectory(rootdir, tableName);
}
/**
* Return the table directory in HDFS
*/
static Path getTableDirectory(Path rootDir, String tableName) {
return FSUtils.getTablePath(rootDir, tableName);
}
private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
}};
/**
* Width of the sequenceid that is a suffix on a tableinfo file.
*/
static final int WIDTH_OF_SEQUENCE_ID = 10;
@VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10;
/*
* @param number Number to use as suffix.
* @return Returns zero-prefixed 5-byte wide decimal version of passed
* @return Returns zero-prefixed decimal version of passed
* number (Does absolute in case number is negative).
*/
static String formatTableInfoSequenceId(final int number) {
private static String formatTableInfoSequenceId(final int number) {
byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
int d = Math.abs(number);
for (int i = b.length - 1; i >= 0; i--) {
@ -333,17 +427,16 @@ public class FSTableDescriptors implements TableDescriptors {
* Use regex because may encounter oldstyle .tableinfos where there is no
* sequenceid on the end.
*/
private static final Pattern SUFFIX =
Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
private static final Pattern TABLEINFO_FILE_REGEX =
Pattern.compile(TABLEINFO_FILE_PREFIX + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
/**
* @param p Path to a <code>.tableinfo</code> file.
* @return The current editid or 0 if none found.
*/
static int getTableInfoSequenceid(final Path p) {
@VisibleForTesting static int getTableInfoSequenceId(final Path p) {
if (p == null) return 0;
Matcher m = SUFFIX.matcher(p.getName());
Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
if (!m.matches()) throw new IllegalArgumentException(p.toString());
String suffix = m.group(2);
if (suffix == null || suffix.length() <= 0) return 0;
@ -355,73 +448,70 @@ public class FSTableDescriptors implements TableDescriptors {
* @param sequenceid
* @return Name of tableinfo file.
*/
static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
return new Path(tabledir,
TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
@VisibleForTesting static String getTableInfoFileName(final int sequenceid) {
return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
}
/**
* @param fs
* @param rootdir
* @param tableName
* @return Modification time for the table {@link #TABLEINFO_NAME} file
* @return Modification time for the table {@link #TABLEINFO_FILE_PREFIX} file
* or <code>0</code> if no tableinfo file found.
* @throws IOException
*/
static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
final String tableName)
throws IOException {
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
private long getTableInfoModtime(final String tableName) throws IOException {
FileStatus status = getTableInfoPath(tableName);
return status == null ? 0 : status.getModificationTime();
}
/**
* Get HTD from HDFS.
* @param fs
* @param hbaseRootDir
* @param tableName
* @return Descriptor or null if none found.
* @throws IOException
* Returns the latest table descriptor for the given table directly from the file system
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
public static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, byte[] tableName)
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, String tableName) throws IOException {
Path tableDir = getTableDirectory(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
HTableDescriptor htd = null;
try {
TableDescriptorModtime tdmt =
getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName));
htd = tdmt == null ? null : tdmt.getTableDescriptor();
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ Bytes.toString(tableName), e);
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
}
return htd;
return readTableDescriptor(fs, status, false);
}
static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, String tableName) throws NullPointerException, IOException {
TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName);
return tdmt == null ? null : tdmt.getTableDescriptor();
}
static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs,
Path hbaseRootDir, String tableName) throws NullPointerException, IOException{
private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName)
throws IOException {
// ignore both -ROOT- and .META. tables
if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
|| Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
return null;
}
return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
return getTableDescriptorAndModtime(getTableDirectory(tableName));
}
static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir)
throws NullPointerException, IOException {
if (tableDir == null) throw new NullPointerException();
FileStatus status = getTableInfoPath(fs, tableDir);
private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(tableDir);
if (status == null) {
throw new TableInfoMissingException("No .tableinfo file under "
+ tableDir.toUri());
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
}
HTableDescriptor htd = readTableDescriptor(fs, status, !fsreadonly);
return new TableDescriptorAndModtime(status.getModificationTime(), htd);
}
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
boolean rewritePb) throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
@ -436,108 +526,131 @@ public class FSTableDescriptors implements TableDescriptors {
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
if (!ProtobufUtil.isPBMagicPrefix(content)) {
if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
// Convert the file over to be pb before leaving here.
createTableDescriptor(fs, tableDir.getParent(), htd, true);
Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, htd, tableDir, status);
}
return new TableDescriptorModtime(status.getModificationTime(), htd);
}
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
throws IOException, NullPointerException {
TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir);
return tdmt == null ? null : tdmt.getTableDescriptor();
return htd;
}
/**
* Update table descriptor
* @param fs
* @param conf
* @param hTableDescriptor
* @return New tableinfo or null if we failed update.
* Update table descriptor on the file system
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor hTableDescriptor)
@VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd)
throws IOException {
Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir,
getTableInfoPath(fs, tableDir));
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
}
Path tableDir = getTableDirectory(htd.getNameAsString());
Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
if (p == null) throw new IOException("Failed update");
LOG.info("Updated tableinfo=" + p);
return p;
}
/**
* Deletes a table's directory from the file system if exists. Used in unit
* tests.
* Deletes all the table descriptor files from the file system.
* Used in unit tests only.
* @throws NotImplementedException if in read only mode
*/
public static void deleteTableDescriptorIfExists(String tableName,
Configuration conf) throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
// The below deleteDirectory works for either file or directory.
if (status != null && fs.exists(status.getPath())) {
FSUtils.deleteDirectory(fs, status.getPath());
public void deleteTableDescriptorIfExists(String tableName) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDirectory(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/**
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
for (FileStatus file : status) {
Path path = file.getPath();
int sequenceId = getTableInfoSequenceId(path);
if (sequenceId <= maxSequenceId) {
boolean success = FSUtils.delete(fs, path, false);
if (success) {
LOG.debug("Deleted table descriptor at " + path);
} else {
LOG.error("Failed to delete descriptor at " + path);
}
}
}
}
/**
* @param fs
* @param hTableDescriptor
* @param tableDir
* @param status
* Attempts to write a new table descriptor to the given table's directory.
* It first writes it to the .tmp dir then uses an atomic rename to move it into place.
* It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
* not already in use.
* Removes the current descriptor file if passed in.
*
* @return Descriptor file or null if we failed write.
* @throws IOException
*/
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir,
final FileStatus status)
final HTableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
Path tmpTableDir = new Path(tableDir, ".tmp");
// This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
// should help in this case some but its hard to do guarantees in face of
// concurrent schema edits.
int currentSequenceid = status == null? 0: getTableInfoSequenceid(status.getPath());
int sequenceid = currentSequenceid;
int currentSequenceId = currentDescriptorFile == null ? 0 :
getTableInfoSequenceId(currentDescriptorFile.getPath());
int newSequenceId = currentSequenceId;
// Put arbitrary upperbound on how often we retry
int retries = 10;
int retrymax = currentSequenceid + retries;
Path tableInfoPath = null;
int retrymax = currentSequenceId + retries;
Path tableInfoDirPath = null;
do {
sequenceid += 1;
Path p = getTableInfoFileName(tmpTableDir, sequenceid);
if (fs.exists(p)) {
LOG.debug(p + " exists; retrying up to " + retries + " times");
newSequenceId += 1;
String filename = getTableInfoFileName(newSequenceId);
Path tempPath = new Path(tmpTableDir, filename);
if (fs.exists(tempPath)) {
LOG.debug(tempPath + " exists; retrying up to " + retries + " times");
continue;
}
tableInfoDirPath = new Path(tableInfoDir, filename);
try {
writeHTD(fs, p, hTableDescriptor);
tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
if (!fs.rename(p, tableInfoPath)) {
throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
writeHTD(fs, tempPath, htd);
fs.mkdirs(tableInfoDirPath.getParent());
if (!fs.rename(tempPath, tableInfoDirPath)) {
throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
}
LOG.debug("Wrote descriptor into: " + tableInfoDirPath);
} catch (IOException ioe) {
// Presume clash of names or something; go around again.
LOG.debug("Failed write and/or rename; retrying", ioe);
if (!FSUtils.deleteDirectory(fs, p)) {
LOG.warn("Failed cleanup of " + p);
if (!FSUtils.deleteDirectory(fs, tempPath)) {
LOG.warn("Failed cleanup of " + tempPath);
}
tableInfoPath = null;
tableInfoDirPath = null;
continue;
}
// Cleanup old schema file.
if (status != null) {
if (!FSUtils.deleteDirectory(fs, status.getPath())) {
LOG.warn("Failed delete of " + status.getPath() + "; continuing");
}
}
break;
} while (sequenceid < retrymax);
return tableInfoPath;
} while (newSequenceId < retrymax);
if (tableInfoDirPath != null) {
// if we succeeded, remove old table info files.
deleteTableDescriptorFiles(fs, tableInfoDir, newSequenceId - 1);
}
return tableInfoDirPath;
}
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
@ -552,92 +665,59 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param htableDescriptor
* @param conf
*/
public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
Configuration conf)
throws IOException {
return createTableDescriptor(htableDescriptor, conf, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param htableDescriptor
* @param conf
* @param forceCreation True if we are to overwrite existing file.
*/
static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
final Configuration conf, boolean forceCreation)
throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, forceCreation);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @param fs
* @param htableDescriptor
* @param rootdir
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor)
throws IOException {
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param fs
* @param htableDescriptor
* @param rootdir
* @param forceCreation
* @return True if we successfully created file.
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString());
return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation);
Path tableDir = getTableDirectory(htd.getNameAsString());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
* Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
* @param fs filesystem where the descriptor should be written
* @param tabledir directory under which we should write the file
* @param htableDescriptor description of the table to write
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
* be overwritten
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
* already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs
*/
public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir,
HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException {
FileStatus status = getTableInfoPath(fs, tabledir);
public boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
FileStatus status = getTableInfoPath(fs, tableDir);
if (status != null) {
LOG.info("Current tableInfoPath = " + status.getPath());
LOG.debug("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) {
LOG.info("TableInfo already exists.. Skipping creation");
if (readTableDescriptor(fs, status, false).equals(htd)) {
LOG.debug("TableInfo already exists.. Skipping creation");
return false;
}
}
}
}
Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status);
Path p = writeTableDescriptor(fs, htd, tableDir, status);
return p != null;
}
}

View File

@ -875,7 +875,7 @@ public abstract class FSUtils {
}
/**
* Checks if root region exists
* Checks if meta region exists
*
* @param fs file system
* @param rootdir root directory of HBase installation
@ -885,9 +885,9 @@ public abstract class FSUtils {
@SuppressWarnings("deprecation")
public static boolean metaRegionExists(FileSystem fs, Path rootdir)
throws IOException {
Path rootRegionDir =
Path metaRegionDir =
HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
return fs.exists(rootRegionDir);
return fs.exists(metaRegionDir);
}
/**

View File

@ -804,7 +804,7 @@ public class HBaseFsck extends Configured implements Tool {
tablesInfo.put(tableName, modTInfo);
try {
HTableDescriptor htd =
FSTableDescriptors.getTableDescriptor(hbaseRoot.getFileSystem(getConf()),
FSTableDescriptors.getTableDescriptorFromFs(hbaseRoot.getFileSystem(getConf()),
hbaseRoot, tableName);
modTInfo.htds.add(htd);
} catch (IOException ioe) {
@ -849,16 +849,16 @@ public class HBaseFsck extends Configured implements Tool {
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
* 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @param tableName
* @throws IOException
*/
private boolean fabricateTableInfo(String tableName, Set<String> columns) throws IOException {
private boolean fabricateTableInfo(FSTableDescriptors fstd, String tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false;
HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) {
htd.addFamily(new HColumnDescriptor(columnfamimly));
}
FSTableDescriptors.createTableDescriptor(htd, getConf(), true);
fstd.createTableDescriptor(htd, true);
return true;
}
@ -889,13 +889,13 @@ public class HBaseFsck extends Configured implements Tool {
public void fixOrphanTables() throws IOException {
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
Path hbaseRoot = FSUtils.getRootDir(getConf());
List<String> tmpList = new ArrayList<String>();
tmpList.addAll(orphanTableDirs.keySet());
HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<String, Set<String>>> iter = orphanTableDirs.entrySet().iterator();
int j = 0;
int numFailedCase = 0;
FSTableDescriptors fstd = new FSTableDescriptors(getConf());
while (iter.hasNext()) {
Entry<String, Set<String>> entry = (Entry<String, Set<String>>) iter.next();
String tableName = entry.getKey();
@ -904,13 +904,12 @@ public class HBaseFsck extends Configured implements Tool {
if (tableName.equals(Bytes.toString(htds[j].getName()))) {
HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
FSTableDescriptors.createTableDescriptor(
hbaseRoot.getFileSystem(getConf()), hbaseRoot, htd, true);
fstd.createTableDescriptor(htd, true);
j++;
iter.remove();
}
} else {
if (fabricateTableInfo(tableName, entry.getValue())) {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove();

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.catalog.MetaEditor;
@ -43,12 +42,9 @@ import org.apache.hadoop.hbase.client.HConnectable;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
@ -153,7 +149,7 @@ class HMerge {
this.rootDir = FSUtils.getRootDir(conf);
Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName);
this.htd = FSTableDescriptors.getTableDescriptor(this.fs, tabledir);
this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);

View File

@ -61,7 +61,6 @@ public class Merge extends Configured implements Tool {
private byte [] tableName; // Name of table
private volatile byte [] region1; // Name of region 1
private volatile byte [] region2; // Name of region 2
private volatile boolean isMetaTable;
private volatile HRegionInfo mergeInfo;
/** default constructor */
@ -153,8 +152,8 @@ public class Merge extends Configured implements Tool {
if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta);
}
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
this.rootdir, this.tableName);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
this.rootdir, Bytes.toString(this.tableName));
HRegion merged = merge(htd, meta, info1, info2);
LOG.info("Adding " + merged.getRegionInfo() + " to " +
@ -246,7 +245,6 @@ public class Merge extends Configured implements Tool {
return -1;
}
tableName = Bytes.toBytes(remainingArgs[0]);
isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0;
region1 = Bytes.toBytesBinary(remainingArgs[1]);
region2 = Bytes.toBytesBinary(remainingArgs[2]);

View File

@ -39,10 +39,10 @@ public class TestFSTableDescriptorForceCreation {
final String name = "newTable2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
assertTrue("Should create new table descriptor",
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@Test
@ -50,13 +50,12 @@ public class TestFSTableDescriptorForceCreation {
throws IOException {
final String name = "testAlreadyExists";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
// Cleanup old tests if any detritus laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd);
assertFalse("Should not create new table descriptor",
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
fstd.add(htd);
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@Test
@ -65,10 +64,11 @@ public class TestFSTableDescriptorForceCreation {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false);
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true));
fstd.createTableDescriptor(htd, true));
}
}

View File

@ -193,8 +193,9 @@ public class TestMasterFailover {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf);
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
fstd.createTableDescriptor(htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
createRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -206,7 +207,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
fstd.createTableDescriptor(htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
createRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
@ -495,8 +496,9 @@ public class TestMasterFailover {
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf);
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
fstd.createTableDescriptor(htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null);
createRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -508,7 +510,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
fstd.createTableDescriptor(htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
createRegion(hriDisabled, rootdir, conf, htdDisabled);

View File

@ -144,7 +144,7 @@ public class TestTableDescriptorModification {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptor(mfs.getFileSystem(), tableDir);
htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(htd, tableName, families);
}

View File

@ -193,9 +193,9 @@ public class SnapshotTestingUtils {
HConstants.HREGION_LOGDIR_NAME));
}
// check the table info
HTableDescriptor desc = FSTableDescriptors.getTableDescriptor(fs, rootDir,
tableName);
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptor(fs,
HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
Bytes.toString(tableName));
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs,
snapshotDir);
assertEquals(desc, snapshotDesc);

View File

@ -359,9 +359,10 @@ public class TestFlushSnapshotFromClient {
assertTrue(fs.exists(snapshotinfo));
// check the table info
HTableDescriptor desc = FSTableDescriptors.getTableDescriptor(fs, rootDir, TABLE_NAME);
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptor(fs,
SnapshotDescriptionUtils.getSnapshotsDir(rootDir), Bytes.toBytes(snapshotName));
HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs,
rootDir,STRING_TABLE_NAME);
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs,
SnapshotDescriptionUtils.getSnapshotsDir(rootDir), snapshotName);
assertEquals(desc, snapshotDesc);
// check the region snapshot for all the regions

View File

@ -126,7 +126,7 @@ public class TestRestoreSnapshotHelper {
LOG.debug("pre-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
FSTableDescriptors.createTableDescriptor(htdClone, conf);
new FSTableDescriptors(conf).createTableDescriptor(htdClone);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone);
helper.restoreHdfsRegions();

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -55,12 +56,12 @@ public class TestFSTableDescriptors {
@Test (expected=IllegalArgumentException.class)
public void testRegexAgainstOldStyleTableInfo() {
Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME);
int i = FSTableDescriptors.getTableInfoSequenceid(p);
Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX);
int i = FSTableDescriptors.getTableInfoSequenceId(p);
assertEquals(0, i);
// Assert it won't eat garbage -- that it fails
p = new Path("/tmp", "abc");
FSTableDescriptors.getTableInfoSequenceid(p);
FSTableDescriptors.getTableInfoSequenceId(p);
}
@Test
@ -68,12 +69,13 @@ public class TestFSTableDescriptors {
Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
HTableDescriptor htd = new HTableDescriptor("testCreate");
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
FileStatus [] statuses = fs.listStatus(testdir);
assertTrue("statuses.length="+statuses.length, statuses.length == 1);
for (int i = 0; i < 10; i++) {
FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
fstd.updateTableDescriptor(htd);
}
statuses = fs.listStatus(testdir);
assertTrue(statuses.length == 1);
@ -83,53 +85,53 @@ public class TestFSTableDescriptors {
}
@Test
public void testSequenceidAdvancesOnTableInfo() throws IOException {
public void testSequenceIdAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
Path p0 = fstd.updateTableDescriptor(htd);
int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
Path p1 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p0));
int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
int i1 = FSTableDescriptors.getTableInfoSequenceId(p1);
assertTrue(i1 == i0 + 1);
Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
Path p2 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p1));
int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
int i2 = FSTableDescriptors.getTableInfoSequenceId(p2);
assertTrue(i2 == i1 + 1);
}
@Test
public void testFormatTableInfoSequenceId() {
Path p0 = assertWriteAndReadSequenceid(0);
Path p0 = assertWriteAndReadSequenceId(0);
// Assert p0 has format we expect.
StringBuilder sb = new StringBuilder();
for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
sb.append("0");
}
assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(),
assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(),
p0.getName());
// Check a few more.
Path p2 = assertWriteAndReadSequenceid(2);
Path p10000 = assertWriteAndReadSequenceid(10000);
Path p2 = assertWriteAndReadSequenceId(2);
Path p10000 = assertWriteAndReadSequenceId(10000);
// Get a .tablinfo that has no sequenceid suffix.
Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX);
FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
FSTableDescriptors.FileStatusFileNameComparator comparator =
new FSTableDescriptors.FileStatusFileNameComparator();
Comparator<FileStatus> comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
assertTrue(comparator.compare(fs, fs0) > 0);
assertTrue(comparator.compare(fs0, fs2) > 0);
assertTrue(comparator.compare(fs2, fs10000) > 0);
}
private Path assertWriteAndReadSequenceid(final int i) {
Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i);
int ii = FSTableDescriptors.getTableInfoSequenceid(p);
private Path assertWriteAndReadSequenceId(final int i) {
Path p = new Path("/tmp", FSTableDescriptors.getTableInfoFileName(i));
int ii = FSTableDescriptors.getTableInfoSequenceId(p);
assertEquals(i, ii);
return p;
}
@ -152,30 +154,19 @@ public class TestFSTableDescriptors {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(name);
Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
fstd.createTableDescriptor(htd);
HTableDescriptor htd2 =
FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getNameAsString());
assertTrue(htd.equals(htd2));
}
private void createHTDInFS(final FileSystem fs, Path rootdir,
final HTableDescriptor htd)
throws IOException {
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
}
@Test public void testHTableDescriptors()
throws IOException, InterruptedException {
final String name = "testHTableDescriptors";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
createHTDInFS(fs, rootdir, htd);
}
FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
@Override
public HTableDescriptor get(byte[] tablename)
@ -184,6 +175,13 @@ public class TestFSTableDescriptors {
return super.get(tablename);
}
};
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htds.createTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
}
@ -194,7 +192,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htd.addFamily(new HColumnDescriptor("" + i));
FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
htds.updateTableDescriptor(htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@ -240,7 +238,8 @@ public class TestFSTableDescriptors {
@Test
public void testTableInfoFileStatusComparator() {
FileStatus bare =
new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
new FileStatus(0, false, 0, 0, -1,
new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX));
FileStatus future =
new FileStatus(0, false, 0, 0, -1,
new Path("/tmp/tablinfo." + System.currentTimeMillis()));
@ -250,8 +249,7 @@ public class TestFSTableDescriptors {
FileStatus [] alist = {bare, future, farFuture};
FileStatus [] blist = {bare, farFuture, future};
FileStatus [] clist = {farFuture, bare, future};
FSTableDescriptors.FileStatusFileNameComparator c =
new FSTableDescriptors.FileStatusFileNameComparator();
Comparator<FileStatus> c = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
Arrays.sort(alist, c);
Arrays.sort(blist, c);
Arrays.sort(clist, c);
@ -282,16 +280,17 @@ public class TestFSTableDescriptors {
HTableDescriptor htd = new HTableDescriptor(
"testCreateTableDescriptorUpdatesIfThereExistsAlready");
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); //this will re-create
Path tableDir = FSUtils.getTablePath(testdir, htd.getName());
Path tmpTableDir = new Path(tableDir, ".tmp");
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDirectory(htd.getNameAsString());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
FileStatus[] statuses = fs.listStatus(tmpTableDir);
assertTrue(statuses.length == 0);
assertEquals(htd, FSTableDescriptors.getTableDescriptor(fs, tableDir));
assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
}

View File

@ -1099,8 +1099,6 @@ public class TestHBaseFsck {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster());
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf);
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
@ -1108,8 +1106,8 @@ public class TestHBaseFsck {
htdDisabled.addFamily(new HColumnDescriptor(FAM));
// Write the .tableinfo
FSTableDescriptors
.createTableDescriptor(filesystem, rootdir, htdDisabled);
FSTableDescriptors fstd = new FSTableDescriptors(conf);
fstd.createTableDescriptor(htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);

View File

@ -95,7 +95,7 @@ public class TestMergeTable {
// Create regions and populate them at same time. Create the tabledir
// for them first.
FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
HRegion [] regions = {
createRegion(desc, null, row_70001, 1, 70000, rootdir),
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),

View File

@ -139,7 +139,7 @@ public class TestMergeTool extends HBaseTestCase {
try {
// Create meta region
createMetaRegion();
FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc);
new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(this.desc);
/*
* Create the regions we will merge
*/