HBASE-12010 Use TableName.META_TABLE_NAME instead of indirectly from HTableDescriptor

Conflicts:
	hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
This commit is contained in:
stack 2014-09-19 00:05:50 -07:00
parent 60287ed983
commit 3a5e580010
7 changed files with 45 additions and 44 deletions

View File

@ -328,7 +328,7 @@ public class MetaTableAccessor {
public static boolean tableExists(HConnection hConnection,
final TableName tableName)
throws IOException {
if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
if (tableName.equals(TableName.META_TABLE_NAME)) {
// Catalog tables always exist.
return true;
}

View File

@ -55,7 +55,7 @@ import com.google.common.primitives.Ints;
* passed filesystem. It expects descriptors to be in a file in the
* {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only
* -- i.e. does not modify the filesystem or can be read and write.
*
*
* <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
* of the table directory in the filesystem.
@ -117,7 +117,7 @@ public class FSTableDescriptors implements TableDescriptors {
public FSTableDescriptors(final Configuration conf) throws IOException {
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
}
public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
this(fs, rootdir, false);
}
@ -136,7 +136,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Get the current table descriptor for the given table, or null if none exists.
*
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@ -144,7 +144,7 @@ public class FSTableDescriptors implements TableDescriptors {
public HTableDescriptor get(final TableName tablename)
throws IOException {
invocations++;
if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) {
if (TableName.META_TABLE_NAME.equals(tablename)) {
cachehits++;
return HTableDescriptor.META_TABLEDESC;
}
@ -164,7 +164,7 @@ public class FSTableDescriptors implements TableDescriptors {
return cachedtdm.getTableDescriptor();
}
}
TableDescriptorAndModtime tdmt = null;
try {
tdmt = getTableDescriptorAndModtime(tablename);
@ -175,7 +175,7 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, ioe);
}
if (tdmt != null) {
this.cache.put(tablename, tdmt);
}
@ -271,7 +271,7 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Checks if a current table info file exists for the given table
*
*
* @param tableName name of table
* @return true if exists
* @throws IOException
@ -279,7 +279,7 @@ public class FSTableDescriptors implements TableDescriptors {
public boolean isTableInfoExists(TableName tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
@ -293,15 +293,15 @@ public class FSTableDescriptors implements TableDescriptors {
throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly);
}
/**
* Find the most current table info file for the table located in the given table directory.
*
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
*
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
@ -309,17 +309,17 @@ public class FSTableDescriptors implements TableDescriptors {
throws IOException {
return getTableInfoPath(fs, tableDir, false);
}
/**
* Find the most current table info file for the table in the given table directory.
*
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if
* present or no sequence number at all if none exist (for backward compatibility from before
* there were sequence numbers).
* If there are multiple table info files found and removeOldFiles is true it also deletes the
* older files.
*
*
* @return The file status of the current table info file or null if none exist
* @throws IOException
*/
@ -328,17 +328,17 @@ public class FSTableDescriptors implements TableDescriptors {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
}
/**
* Find the most current table info file in the given directory
*
*
* Looks within the given directory for any table info files
* and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
* If there are multiple possible files found
* and the we're not in read only mode it also deletes the older files.
*
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
@ -368,7 +368,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return mostCurrent;
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
* reverse order.
@ -393,7 +393,7 @@ public class FSTableDescriptors implements TableDescriptors {
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
}};
}};
/**
* Width of the sequenceid that is a suffix on a tableinfo file.
@ -482,7 +482,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return readTableDescriptor(fs, status, false);
}
/**
* @param tableName table name
* @return TableDescriptorAndModtime or null if no table descriptor was found
@ -537,7 +537,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return htd;
}
/**
* Update table descriptor on the file system
* @throws IOException Thrown if failed update.
@ -564,14 +564,14 @@ public class FSTableDescriptors implements TableDescriptors {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDir(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/**
* Deletes files matching the table info file pattern within the given directory
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
@ -590,25 +590,25 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
}
/**
* Attempts to write a new table descriptor to the given table's directory.
* It first writes it to the .tmp dir then uses an atomic rename to move it into place.
* It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
* not already in use.
* Removes the current descriptor file if passed in.
*
*
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
// This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
@ -617,7 +617,7 @@ public class FSTableDescriptors implements TableDescriptors {
int currentSequenceId = currentDescriptorFile == null ? 0 :
getTableInfoSequenceId(currentDescriptorFile.getPath());
int newSequenceId = currentSequenceId;
// Put arbitrary upperbound on how often we retry
int retries = 10;
int retrymax = currentSequenceId + retries;
@ -655,7 +655,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return tableInfoDirPath;
}
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
@ -681,7 +681,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
@ -689,7 +689,7 @@ public class FSTableDescriptors implements TableDescriptors {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
* Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
@ -721,6 +721,6 @@ public class FSTableDescriptors implements TableDescriptors {
Path p = writeTableDescriptor(fs, htd, tableDir, status);
return p != null;
}
}

View File

@ -1159,7 +1159,7 @@ public class TestAdmin {
}
assertTrue("Unexcepted exception message " + msg, msg != null &&
msg.startsWith(TableExistsException.class.getName()) &&
msg.contains(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString()));
msg.contains(TableName.META_TABLE_NAME.getNameAsString()));
// Now try and do concurrent creation with a bunch of threads.
final HTableDescriptor threadDesc =

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -58,7 +59,7 @@ public class TestLogRollingNoCluster {
// The implementation needs to know the 'handler' count.
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, THREAD_COUNT);
HLog wal = HLogFactory.createHLog(fs, dir, "logs", TEST_UTIL.getConfiguration());
Appender [] appenders = null;
final int count = THREAD_COUNT;
@ -125,7 +126,7 @@ public class TestLogRollingNoCluster {
byte[] bytes = Bytes.toBytes(i);
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
this.wal.append(HRegionInfo.FIRST_META_REGIONINFO,
HTableDescriptor.META_TABLEDESC.getTableName(),
TableName.META_TABLE_NAME,
edit, now, HTableDescriptor.META_TABLEDESC, sequenceId);
}
String msg = getName() + " finished";

View File

@ -64,7 +64,7 @@ public class TestReplicationWALEntryFilters {
assertNull(filter.filter(metaEntry));
// ns table
HLogKey key2 = new HLogKey(new byte[] {}, HTableDescriptor.NAMESPACE_TABLEDESC.getTableName());
HLogKey key2 = new HLogKey(new byte[] {}, TableName.NAMESPACE_TABLE_NAME);
HLog.Entry nsEntry = new Entry(key2, null);
assertNull(filter.filter(nsEntry));

View File

@ -160,7 +160,7 @@ public class TestHBaseFsck {
// Now let's mess it up and change the assignment in hbase:meta to
// point to a different region server
Table meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(),
Table meta = new HTable(conf, TableName.META_TABLE_NAME,
executorService);
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(table+",,"));
@ -1310,7 +1310,7 @@ public class TestHBaseFsck {
Bytes.toBytes("C"), true, true, false);
// Create a new meta entry to fake it as a split parent.
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(),
meta = new HTable(conf, TableName.META_TABLE_NAME,
executorService);
HRegionInfo hri = location.getRegionInfo();
@ -1385,7 +1385,7 @@ public class TestHBaseFsck {
TEST_UTIL.getHBaseAdmin().flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = location.getRegionInfo();
// do a regular split
@ -1435,7 +1435,7 @@ public class TestHBaseFsck {
TEST_UTIL.getHBaseAdmin().flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = location.getRegionInfo();
// do a regular split
@ -1978,7 +1978,7 @@ public class TestHBaseFsck {
// Mess it up by removing the RegionInfo for one region.
final List<Delete> deletes = new LinkedList<Delete>();
Table meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
Table meta = new HTable(conf, TableName.META_TABLE_NAME);
MetaScanner.metaScan(conf, new MetaScanner.MetaScannerVisitor() {
@Override

View File

@ -278,7 +278,7 @@ public class OfflineMetaRebuildTestCore {
*/
protected int scanMeta() throws IOException {
int count = 0;
HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName());
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
ResultScanner scanner = meta.getScanner(new Scan());
LOG.info("Table: " + Bytes.toString(meta.getTableName()));
for (Result res : scanner) {