HBASE-12219 Cache more efficiently getAll() and get() in FSTableDescriptors

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Esteban Gutierrez 2014-10-28 14:18:07 -07:00 committed by stack
parent a71dd16a70
commit ba7344f5d1
7 changed files with 276 additions and 134 deletions

View File

@ -93,4 +93,14 @@ public interface TableDescriptors {
*/
HTableDescriptor remove(final TableName tablename)
throws IOException;
/**
* Enables the tabledescriptor cache
*/
void setCacheOn() throws IOException;
/**
* Disables the tabledescriptor cache
*/
void setCacheOff() throws IOException;
}

View File

@ -215,6 +215,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
MasterCoprocessorHost cpHost;
private final boolean preLoadTableDescriptors;
// Time stamps for when a hmaster became active
private long masterActiveTime;
@ -290,6 +292,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));
// preload table descriptor at startup
this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
// Do we publish the status?
boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
@ -517,6 +522,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
this.fileSystemManager = new MasterFileSystem(this, this);
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
this.tableDescriptors.getAll();
}
// publish cluster ID
status.setStatus("Publishing Cluster ID in ZooKeeper");
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());

View File

@ -229,9 +229,12 @@ public class CreateTableHandler extends EventHandler {
ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
}
// 6. Enable table
// 8. Enable table
assignmentManager.getTableStateManager().setTableState(tableName,
TableState.State.ENABLED);
// 9. Update the tabledescriptor cache.
((HMaster) this.server).getTableDescriptors().get(tableName);
}
/**

View File

@ -517,7 +517,7 @@ public class HRegionServer extends HasThread implements
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
this.rootDir = FSUtils.getRootDir(this.conf);
this.tableDescriptors = new FSTableDescriptors(this.conf,
this.fs, this.rootDir, !canUpdateTableDescriptor());
this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
service = new ExecutorService(getServerName().toShortString());
spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());

View File

@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
* passed filesystem. It expects descriptors to be in a file in the
* {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only
* -- i.e. does not modify the filesystem or can be read and write.
*
*
* <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
* of the table directory in the filesystem.
@ -78,6 +78,9 @@ public class FSTableDescriptors implements TableDescriptors {
private final FileSystem fs;
private final Path rootdir;
private final boolean fsreadonly;
private volatile boolean usecache;
private volatile boolean fsvisited;
@VisibleForTesting long cachehits = 0;
@VisibleForTesting long invocations = 0;
@ -89,43 +92,14 @@ public class FSTableDescriptors implements TableDescriptors {
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
private final Map<TableName, TableDescriptorAndModtime> cache =
new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
private final Map<TableName, TableDescriptor> cache =
new ConcurrentHashMap<TableName, TableDescriptor>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
private final HTableDescriptor metaTableDescritor;
/**
* Data structure to hold modification time and table descriptor.
*/
private static class TableDescriptorAndModtime {
private final TableDescriptor td;
private final long modtime;
TableDescriptorAndModtime(final long modtime, final TableDescriptor td) {
this.td = td;
this.modtime = modtime;
}
long getModtime() {
return this.modtime;
}
TableDescriptor getTableDescriptor() {
return this.td;
}
HTableDescriptor getHTableDescriptor() {
return this.td.getHTableDescriptor();
}
TableState.State getTableState() {
return this.td.getTableState();
}
}
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
* conf and the filesystem where that root dir lives.
@ -134,10 +108,10 @@ public class FSTableDescriptors implements TableDescriptors {
public FSTableDescriptors(final Configuration conf) throws IOException {
this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
}
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
throws IOException {
this(conf, fs, rootdir, false);
throws IOException {
this(conf, fs, rootdir, false, true);
}
/**
@ -145,18 +119,34 @@ public class FSTableDescriptors implements TableDescriptors {
* operations; i.e. on remove, we do not do delete in fs.
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly) throws IOException {
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
super();
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
}
public void setCacheOn() throws IOException {
this.cache.clear();
this.usecache = true;
}
public void setCacheOff() throws IOException {
this.usecache = false;
this.cache.clear();
}
@VisibleForTesting
public boolean isUsecache() {
return this.usecache;
}
/**
* Get the current table descriptor for the given table, or null if none exists.
*
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/
@ -175,20 +165,17 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("No descriptor found for non table = " + tablename);
}
// Look in cache of descriptors.
TableDescriptorAndModtime cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
// Check mod time has not changed (this is trip to NN).
if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) {
if (usecache) {
// Look in cache of descriptors.
TableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm.getTableDescriptor();
return cachedtdm;
}
}
TableDescriptorAndModtime tdmt = null;
TableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorAndModtime(tablename);
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename, !fsreadonly);
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, e);
@ -196,11 +183,12 @@ public class FSTableDescriptors implements TableDescriptors {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, ioe);
}
if (tdmt != null) {
// last HTD written wins
if (usecache && tdmt != null) {
this.cache.put(tablename, tdmt);
}
return tdmt == null ? null : tdmt.getTableDescriptor();
return tdmt;
}
/**
@ -226,17 +214,33 @@ public class FSTableDescriptors implements TableDescriptors {
public Map<String, TableDescriptor> getAllDescriptors()
throws IOException {
Map<String, TableDescriptor> tds = new TreeMap<String, TableDescriptor>();
List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
for (Path d: tableDirs) {
TableDescriptor htd = null;
try {
htd = getDescriptor(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
if (fsvisited && usecache) {
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
tds.put(this.metaTableDescritor.getNameAsString(),
new TableDescriptor(metaTableDescritor, TableState.State.ENABLED));
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
TableDescriptor htd = null;
try {
htd = getDescriptor(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
// inability of retrieving one HTD shouldn't stop getting the remaining
LOG.warn("Trouble retrieving htd", fnfe);
}
if (htd == null) {
allvisited = false;
continue;
} else {
tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd);
}
fsvisited = allvisited;
}
if (htd == null) continue;
tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd);
}
return tds;
}
@ -343,13 +347,17 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
TableDescriptorAndModtime tdm = this.cache.remove(tablename);
return tdm == null ? null : tdm.getHTableDescriptor();
TableDescriptor descriptor = this.cache.remove(tablename);
if (descriptor == null) {
return null;
} else {
return descriptor.getHTableDescriptor();
}
}
/**
* Checks if a current table info file exists for the given table
*
*
* @param tableName name of table
* @return true if exists
* @throws IOException
@ -357,7 +365,7 @@ public class FSTableDescriptors implements TableDescriptors {
public boolean isTableInfoExists(TableName tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
@ -371,15 +379,15 @@ public class FSTableDescriptors implements TableDescriptors {
throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly);
}
/**
* Find the most current table info file for the table located in the given table directory.
*
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
*
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
@ -387,17 +395,17 @@ public class FSTableDescriptors implements TableDescriptors {
throws IOException {
return getTableInfoPath(fs, tableDir, false);
}
/**
* Find the most current table info file for the table in the given table directory.
*
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if
* present or no sequence number at all if none exist (for backward compatibility from before
* there were sequence numbers).
* If there are multiple table info files found and removeOldFiles is true it also deletes the
* older files.
*
*
* @return The file status of the current table info file or null if none exist
* @throws IOException
*/
@ -406,17 +414,17 @@ public class FSTableDescriptors implements TableDescriptors {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
}
/**
* Find the most current table info file in the given directory
*
*
* Looks within the given directory for any table info files
* and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
* If there are multiple possible files found
* and the we're not in read only mode it also deletes the older files.
*
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
@ -446,7 +454,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return mostCurrent;
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
* reverse order.
@ -471,7 +479,7 @@ public class FSTableDescriptors implements TableDescriptors {
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
}};
}};
/**
* Width of the sequenceid that is a suffix on a tableinfo file.
@ -515,7 +523,6 @@ public class FSTableDescriptors implements TableDescriptors {
}
/**
* @param tabledir
* @param sequenceid
* @return Name of tableinfo file.
*/
@ -523,19 +530,6 @@ public class FSTableDescriptors implements TableDescriptors {
return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
}
/**
* @param fs
* @param rootdir
* @param tableName
* @return Modification time for the table {@link #TABLEINFO_FILE_PREFIX} file
* or <code>0</code> if no tableinfo file found.
* @throws IOException
*/
private long getTableInfoModtime(final TableName tableName) throws IOException {
FileStatus status = getTableInfoPath(tableName);
return status == null ? 0 : status.getModificationTime();
}
/**
* Returns the latest table descriptor for the given table directly from the file system
* if it exists, bypassing the local cache.
@ -547,48 +541,39 @@ public class FSTableDescriptors implements TableDescriptors {
return getTableDescriptorFromFs(fs, tableDir);
}
/**
* Returns the latest table descriptor for the given table directly from the file system
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir, rewritePb);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
return getTableDescriptorFromFs(fs, tableDir, false);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
}
return readTableDescriptor(fs, status, false);
}
/**
* @param tableName table name
* @return TableDescriptorAndModtime or null if no table descriptor was found
* @throws IOException
*/
private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName)
throws IOException {
// ignore both -ROOT- and hbase:meta tables
if (tableName.equals(TableName.META_TABLE_NAME)) {
return null;
}
return getTableDescriptorAndModtime(getTableDir(tableName));
}
/**
* @param tableDir path to table directory
* @return TableDescriptorAndModtime or null if no table descriptor was found
* at the specified path
* @throws IOException
*/
private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(tableDir);
if (status == null) {
return null;
}
TableDescriptor td = readTableDescriptor(fs, status, !fsreadonly);
return new TableDescriptorAndModtime(status.getModificationTime(), td);
return readTableDescriptor(fs, status, rewritePb);
}
private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
@ -646,8 +631,9 @@ public class FSTableDescriptors implements TableDescriptors {
Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
if (p == null) throw new IOException("Failed update");
LOG.info("Updated tableinfo=" + p);
long modtime = getTableInfoModtime(tableName);
this.cache.put(tableName, new TableDescriptorAndModtime(modtime, td));
if (usecache) {
this.cache.put(td.getHTableDescriptor().getTableName(), td);
}
return p;
}
@ -660,14 +646,14 @@ public class FSTableDescriptors implements TableDescriptors {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDir(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/**
* Deletes files matching the table info file pattern within the given directory
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
@ -686,25 +672,25 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
}
/**
* Attempts to write a new table descriptor to the given table's directory.
* It first writes it to the .tmp dir then uses an atomic rename to move it into place.
* It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
* not already in use.
* Removes the current descriptor file if passed in.
*
*
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
private static Path writeTableDescriptor(final FileSystem fs,
final TableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
// This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries
@ -713,7 +699,7 @@ public class FSTableDescriptors implements TableDescriptors {
int currentSequenceId = currentDescriptorFile == null ? 0 :
getTableInfoSequenceId(currentDescriptorFile.getPath());
int newSequenceId = currentSequenceId;
// Put arbitrary upperbound on how often we retry
int retries = 10;
int retrymax = currentSequenceId + retries;
@ -751,7 +737,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
return tableInfoDirPath;
}
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
@ -786,7 +772,7 @@ public class FSTableDescriptors implements TableDescriptors {
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)

View File

@ -348,6 +348,14 @@ public class TestCatalogJanitor {
public void add(TableDescriptor htd) throws IOException {
// noop
}
@Override
public void setCacheOn() throws IOException {
}
@Override
public void setCacheOff() throws IOException {
}
};
}

View File

@ -28,6 +28,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Map;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -37,6 +38,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -250,6 +252,105 @@ public class TestFSTableDescriptors {
htds.cachehits >= (count * 2));
}
@Test
public void testHTableDescriptorsNoCache()
throws IOException, InterruptedException {
final String name = "testHTableDescriptorsNoCache";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptorsTest(UTIL.getConfiguration(), fs, rootdir,
false, false);
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htds.createTableDescriptor(htd);
}
for (int i = 0; i < 2 * count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i % 2)));
}
// Update the table infos
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htd.addFamily(new HColumnDescriptor("" + i));
htds.updateTableDescriptor(new TableDescriptor(htd));
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
assertTrue("Column Family " + i + " missing",
htds.get(TableName.valueOf(name + i)).hasFamily(Bytes.toBytes("" + i)));
}
assertEquals(count * 4, htds.invocations);
assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits);
}
@Test
public void testGetAll()
throws IOException, InterruptedException {
final String name = "testGetAll";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptorsTest(UTIL.getConfiguration(), fs, rootdir);
final int count = 4;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htds.createTableDescriptor(htd);
}
// add hbase:meta
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
htds.createTableDescriptor(htd);
assertEquals("getAll() didn't return all TableDescriptors, expected: " +
(count + 1) + " got: " + htds.getAll().size(),
count + 1, htds.getAll().size());
}
@Test
public void testCacheConsistency()
throws IOException, InterruptedException {
final String name = "testCacheConsistency";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors chtds = new FSTableDescriptorsTest(UTIL.getConfiguration(), fs, rootdir);
FSTableDescriptors nonchtds = new FSTableDescriptorsTest(UTIL.getConfiguration(), fs,
rootdir, false, false);
final int count = 10;
// Write out table infos via non-cached FSTableDescriptors
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
nonchtds.createTableDescriptor(htd);
}
// Calls to getAll() won't increase the cache counter, do per table.
for (int i = 0; i < count; i++) {
assertTrue(chtds.get(TableName.valueOf(name + i)) != null);
}
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName());
nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
for (Map.Entry entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey();
HTableDescriptor nchtd = (HTableDescriptor) entry.getValue();
assertTrue("expected " + htd.toString() +
" got: " + chtds.get(TableName.valueOf(t)).toString(),
(nchtd.equals(chtds.get(TableName.valueOf(t)))));
}
}
@Test
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
@ -334,5 +435,25 @@ public class TestFSTableDescriptors {
assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
}
private static class FSTableDescriptorsTest extends FSTableDescriptors {
public FSTableDescriptorsTest(Configuration conf, FileSystem fs, Path rootdir)
throws IOException {
this(conf, fs, rootdir, false, true);
}
public FSTableDescriptorsTest(Configuration conf, FileSystem fs, Path rootdir,
boolean fsreadonly, boolean usecache) throws IOException {
super(conf, fs, rootdir, fsreadonly, usecache);
}
@Override
public HTableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
" HTableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
}
}
}