diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index 85985814a2b..e82082cff3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -31,7 +31,7 @@ public interface TableDescriptors {
/**
* @return TableDescriptor for tablename
*/
- TableDescriptor get(final TableName tableName) throws IOException;
+ TableDescriptor get(TableName tableName) throws IOException;
/**
* Get Map of all NamespaceDescriptors for a given namespace.
@@ -40,32 +40,33 @@ public interface TableDescriptors {
Map getByNamespace(String name) throws IOException;
/**
- * Get Map of all TableDescriptors. Populates the descriptor cache as a
- * side effect.
+ * Get Map of all TableDescriptors. Populates the descriptor cache as a side effect.
+ *
* Notice: the key of map is the table name which contains namespace. It was generated by
* {@link TableName#getNameWithNamespaceInclAsString()}.
* @return Map of all descriptors.
*/
Map getAll() throws IOException;
+ /**
+ * Add or update descriptor. Just call {@link #update(TableDescriptor, boolean)} with
+ * {@code cacheOnly} as {@code false}.
+ */
+ default void update(TableDescriptor htd) throws IOException {
+ update(htd, false);
+ }
+
/**
* Add or update descriptor
* @param htd Descriptor to set into TableDescriptors
+ * @param cacheOnly only add the given {@code htd} to cache, without updating the storage. For
+ * example, when creating table, we will write the descriptor to fs when creating the fs
+ * layout, so we do not need to update the fs again.
*/
- void update(final TableDescriptor htd) throws IOException;
+ void update(TableDescriptor htd, boolean cacheOnly) throws IOException;
/**
* @return Instance of table descriptor or null if none found.
*/
- TableDescriptor remove(final TableName tablename) throws IOException;
-
- /**
- * Enables the tabledescriptor cache
- */
- void setCacheOn() throws IOException;
-
- /**
- * Disables the tabledescriptor cache
- */
- void setCacheOff() throws IOException;
+ TableDescriptor remove(TableName tablename) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a70dc482ca4..f1453f325b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -222,6 +222,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -233,6 +234,7 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
+
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@@ -759,6 +761,11 @@ public class HMaster extends HRegionServer implements MasterServices {
return true;
}
+ @Override
+ protected boolean cacheTableDescriptor() {
+ return true;
+ }
+
@Override
protected RSRpcServices createRpcServices() throws IOException {
return new MasterRpcServices(this);
@@ -924,9 +931,6 @@ public class HMaster extends HRegionServer implements MasterServices {
this.fileSystemManager = new MasterFileSystem(conf);
this.walManager = new MasterWalManager(this);
- // enable table descriptors cache
- this.tableDescriptors.setCacheOn();
-
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 1c2bddc37e4..3b483e6aeb2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -141,6 +141,7 @@ public class CloneSnapshotProcedure
break;
case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
+ env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
break;
case CLONE_SNAPSHOT_ADD_TO_META:
@@ -172,8 +173,9 @@ public class CloneSnapshotProcedure
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE);
break;
case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
+ // XXX: this stage should be named as set table enabled, as now we will cache the
+ // descriptor after writing fs layout.
CreateTableProcedure.setEnabledState(env, getTableName());
- CreateTableProcedure.updateTableDescCache(env, getTableName());
setNextState(CloneSnapshotState.CLONE_SNAPHOST_RESTORE_ACL);
break;
case CLONE_SNAPHOST_RESTORE_ACL:
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index b48cef8cec0..113f96c431d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -101,6 +101,7 @@ public class CreateTableProcedure
case CREATE_TABLE_WRITE_FS_LAYOUT:
DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, true);
newRegions = createFsLayout(env, tableDescriptor, newRegions);
+ env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
break;
case CREATE_TABLE_ADD_TO_META:
@@ -114,8 +115,9 @@ public class CreateTableProcedure
setNextState(CreateTableState.CREATE_TABLE_UPDATE_DESC_CACHE);
break;
case CREATE_TABLE_UPDATE_DESC_CACHE:
+ // XXX: this stage should be named as set table enabled, as now we will cache the
+ // descriptor after writing fs layout.
setEnabledState(env, getTableName());
- updateTableDescCache(env, getTableName());
setNextState(CreateTableState.CREATE_TABLE_POST_OPERATION);
break;
case CREATE_TABLE_POST_OPERATION:
@@ -406,11 +408,6 @@ public class CreateTableProcedure
regionInfos, tableDescriptor.getRegionReplication());
}
- protected static void updateTableDescCache(final MasterProcedureEnv env,
- final TableName tableName) throws IOException {
- env.getMasterServices().getTableDescriptors().get(tableName);
- }
-
@Override
protected boolean shouldWaitClientAck(MasterProcedureEnv env) {
// system tables are created on bootstrap internally by the system
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index cdf78cc3017..f158452296c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -72,7 +72,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure builder.setRegionReplication(
+ TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs,
+ rootDir, builder -> builder.setRegionReplication(
conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM)));
- TableDescriptor metaDescriptor = new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
HRegion
.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
.close();
+ return metaDescriptor;
}
@Override
@@ -101,7 +101,8 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure metaObserver) throws IOException {
+ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf,
+ FileSystem fs, Path rootdir,
+ Function metaObserver) throws IOException {
// see if we already have meta descriptor on fs. Write one if not.
try {
- getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
+ return getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
} catch (TableInfoMissingException e) {
TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
if (metaObserver != null) {
@@ -144,6 +144,7 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed update hbase:meta table descriptor");
}
LOG.info("Updated hbase:meta table descriptor to {}", p);
+ return td;
}
}
@@ -196,57 +197,45 @@ public class FSTableDescriptors implements TableDescriptors {
.setPriority(Coprocessor.PRIORITY_SYSTEM).build());
}
- @Override
- public void setCacheOn() throws IOException {
- this.cache.clear();
- this.usecache = true;
- }
-
- @Override
- public void setCacheOff() throws IOException {
- this.usecache = false;
- this.cache.clear();
- }
-
@VisibleForTesting
- public boolean isUsecache() {
+ protected boolean isUsecache() {
return this.usecache;
}
/**
* Get the current table descriptor for the given table, or null if none exists.
- *
- * Uses a local cache of the descriptor but still checks the filesystem on each call
- * to see if a newer file has been created since the cached one was read.
+ *
+ * Uses a local cache of the descriptor but still checks the filesystem on each call if
+ * {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer
+ * file has been created since the cached one was read.
*/
@Override
@Nullable
- public TableDescriptor get(final TableName tablename)
- throws IOException {
+ public TableDescriptor get(TableName tableName) {
invocations++;
if (usecache) {
// Look in cache of descriptors.
- TableDescriptor cachedtdm = this.cache.get(tablename);
+ TableDescriptor cachedtdm = this.cache.get(tableName);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
+ // we do not need to go to fs any more
+ if (fsvisited) {
+ return null;
+ }
}
TableDescriptor tdmt = null;
try {
- tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
- } catch (NullPointerException e) {
- LOG.debug("Exception during readTableDecriptor. Current table name = "
- + tablename, e);
+ tdmt = getTableDescriptorFromFs(fs, rootdir, tableName);
} catch (TableInfoMissingException e) {
// ignore. This is regular operation
- } catch (IOException ioe) {
- LOG.debug("Exception during readTableDecriptor. Current table name = "
- + tablename, ioe);
+ } catch (NullPointerException | IOException ioe) {
+ LOG.debug("Exception during readTableDecriptor. Current table name = " + tableName, ioe);
}
// last HTD written wins
if (usecache && tdmt != null) {
- this.cache.put(tablename, tdmt);
+ this.cache.put(tableName, tdmt);
}
return tdmt;
@@ -258,29 +247,22 @@ public class FSTableDescriptors implements TableDescriptors {
@Override
public Map getAll() throws IOException {
Map tds = new TreeMap<>();
- if (fsvisited && usecache) {
+ if (fsvisited) {
for (Map.Entry entry: this.cache.entrySet()) {
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
}
} else {
LOG.trace("Fetching table descriptors from the filesystem.");
- boolean allvisited = true;
+ boolean allvisited = usecache;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
- TableDescriptor htd = null;
- try {
- htd = get(CommonFSUtils.getTableName(d));
- } catch (FileNotFoundException fnfe) {
- // inability of retrieving one HTD shouldn't stop getting the remaining
- LOG.warn("Trouble retrieving htd", fnfe);
- }
+ TableDescriptor htd = get(CommonFSUtils.getTableName(d));
if (htd == null) {
allvisited = false;
- continue;
} else {
tds.put(htd.getTableName().getNameWithNamespaceInclAsString(), htd);
}
- fsvisited = allvisited;
}
+ fsvisited = allvisited;
}
return tds;
}
@@ -290,35 +272,48 @@ public class FSTableDescriptors implements TableDescriptors {
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
- public Map getByNamespace(String name)
- throws IOException {
+ public Map getByNamespace(String name) throws IOException {
Map htds = new TreeMap<>();
List tableDirs =
- FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name));
- for (Path d: tableDirs) {
- TableDescriptor htd = null;
- try {
- htd = get(CommonFSUtils.getTableName(d));
- } catch (FileNotFoundException fnfe) {
- // inability of retrieving one HTD shouldn't stop getting the remaining
- LOG.warn("Trouble retrieving htd", fnfe);
+ FSUtils.getLocalTableDirs(fs, CommonFSUtils.getNamespaceDir(rootdir, name));
+ for (Path d : tableDirs) {
+ TableDescriptor htd = get(CommonFSUtils.getTableName(d));
+ if (htd == null) {
+ continue;
}
- if (htd == null) continue;
htds.put(CommonFSUtils.getTableName(d).getNameAsString(), htd);
}
return htds;
}
- /**
- * Adds (or updates) the table descriptor to the FileSystem
- * and updates the local cache with it.
- */
@Override
- public void update(TableDescriptor htd) throws IOException {
+ public void update(TableDescriptor td, boolean cacheOnly) throws IOException {
+ // TODO: in fact this method will only be called at master side, so fsreadonly and usecache will
+ // always be true. In general, we'd better have a ReadOnlyFSTableDesciptors for HRegionServer
+ // but now, HMaster extends HRegionServer, so unless making use of generic, we can not have
+ // different implementations for HMaster and HRegionServer. Revisit this when we make HMaster
+ // not extend HRegionServer in the future.
if (fsreadonly) {
- throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
+ throw new UnsupportedOperationException("Cannot add a table descriptor - in read only mode");
}
- updateTableDescriptor(htd);
+ if (!cacheOnly) {
+ updateTableDescriptor(td);
+ }
+ if (usecache) {
+ this.cache.put(td.getTableName(), td);
+ }
+ }
+
+ @VisibleForTesting
+ Path updateTableDescriptor(TableDescriptor td) throws IOException {
+ TableName tableName = td.getTableName();
+ Path tableDir = getTableDir(tableName);
+ Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
+ if (p == null) {
+ throw new IOException("Failed update");
+ }
+ LOG.info("Updated tableinfo=" + p);
+ return p;
}
/**
@@ -380,22 +375,21 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Find the most current table info file in the given directory
- *
- * Looks within the given directory for any table info files
- * and takes the 'current' one - meaning the one with the highest sequence number if present
- * or no sequence number at all if none exist (for backward compatibility from before there
- * were sequence numbers).
- * If there are multiple possible files found
- * and the we're not in read only mode it also deletes the older files.
- *
+ *
+ * Looks within the given directory for any table info files and takes the 'current' one - meaning
+ * the one with the highest sequence number if present or no sequence number at all if none exist
+ * (for backward compatibility from before there were sequence numbers).
+ *
+ * If there are multiple possible files found and the we're not in read only mode it also deletes
+ * the older files.
* @return The file status of the current table info file or null if it does not exist
- * @throws IOException
*/
- // only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
- static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles)
- throws IOException {
+ private static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir,
+ boolean removeOldFiles) throws IOException {
FileStatus[] status = CommonFSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
- if (status == null || status.length < 1) return null;
+ if (status == null || status.length < 1) {
+ return null;
+ }
FileStatus mostCurrent = null;
for (FileStatus file : status) {
if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
@@ -419,16 +413,16 @@ public class FSTableDescriptors implements TableDescriptors {
}
/**
- * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
- * reverse order.
+ * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in reverse order.
*/
@VisibleForTesting
static final Comparator TABLEINFO_FILESTATUS_COMPARATOR =
- new Comparator() {
- @Override
- public int compare(FileStatus left, FileStatus right) {
- return right.compareTo(left);
- }};
+ new Comparator() {
+ @Override
+ public int compare(FileStatus left, FileStatus right) {
+ return right.compareTo(left);
+ }
+ };
/**
* Return the table directory in HDFS
@@ -448,12 +442,13 @@ public class FSTableDescriptors implements TableDescriptors {
/**
* Width of the sequenceid that is a suffix on a tableinfo file.
*/
- @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10;
+ @VisibleForTesting
+ static final int WIDTH_OF_SEQUENCE_ID = 10;
- /*
+ /**
* @param number Number to use as suffix.
- * @return Returns zero-prefixed decimal version of passed
- * number (Does absolute in case number is negative).
+ * @return Returns zero-prefixed decimal version of passed number (Does absolute in case number is
+ * negative).
*/
private static String formatTableInfoSequenceId(final int number) {
byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
@@ -477,12 +472,19 @@ public class FSTableDescriptors implements TableDescriptors {
* @param p Path to a .tableinfo
file.
* @return The current editid or 0 if none found.
*/
- @VisibleForTesting static int getTableInfoSequenceId(final Path p) {
- if (p == null) return 0;
+ @VisibleForTesting
+ static int getTableInfoSequenceId(final Path p) {
+ if (p == null) {
+ return 0;
+ }
Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
- if (!m.matches()) throw new IllegalArgumentException(p.toString());
+ if (!m.matches()) {
+ throw new IllegalArgumentException(p.toString());
+ }
String suffix = m.group(2);
- if (suffix == null || suffix.length() <= 0) return 0;
+ if (suffix == null || suffix.length() <= 0) {
+ return 0;
+ }
return Integer.parseInt(m.group(2));
}
@@ -490,7 +492,8 @@ public class FSTableDescriptors implements TableDescriptors {
* @param sequenceid
* @return Name of tableinfo file.
*/
- @VisibleForTesting static String getTableInfoFileName(final int sequenceid) {
+ @VisibleForTesting
+ static String getTableInfoFileName(final int sequenceid) {
return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
}
@@ -511,7 +514,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
- throws IOException {
+ throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " + tableDir);
@@ -538,29 +541,6 @@ public class FSTableDescriptors implements TableDescriptors {
return htd;
}
- /**
- * Update table descriptor on the file system
- * @throws IOException Thrown if failed update.
- * @throws NotImplementedException if in read only mode
- */
- @VisibleForTesting
- Path updateTableDescriptor(TableDescriptor td) throws IOException {
- if (fsreadonly) {
- throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
- }
- TableName tableName = td.getTableName();
- Path tableDir = getTableDir(tableName);
- Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
- if (p == null) {
- throw new IOException("Failed update");
- }
- LOG.info("Updated tableinfo=" + p);
- if (usecache) {
- this.cache.put(td.getTableName(), td);
- }
- return p;
- }
-
/**
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
@@ -583,18 +563,15 @@ public class FSTableDescriptors implements TableDescriptors {
}
/**
- * Attempts to write a new table descriptor to the given table's directory.
- * It first writes it to the .tmp dir then uses an atomic rename to move it into place.
- * It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
- * not already in use.
+ * Attempts to write a new table descriptor to the given table's directory. It first writes it to
+ * the .tmp dir then uses an atomic rename to move it into place. It begins at the
+ * currentSequenceId + 1 and tries 10 times to find a new sequence number not already in use.
+ *
* Removes the current descriptor file if passed in.
- *
* @return Descriptor file or null if we failed write.
*/
- private static Path writeTableDescriptor(final FileSystem fs,
- final TableDescriptor htd, final Path tableDir,
- final FileStatus currentDescriptorFile)
- throws IOException {
+ private static Path writeTableDescriptor(final FileSystem fs, final TableDescriptor htd,
+ final Path tableDir, final FileStatus currentDescriptorFile) throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
// This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 8c949092526..5e78c3dc775 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -331,17 +331,9 @@ public class MockMasterServices extends MockNoopMasterServices {
}
@Override
- public void update(TableDescriptor htd) throws IOException {
+ public void update(TableDescriptor htd, boolean cacheOnly) throws IOException {
// noop
}
-
- @Override
- public void setCacheOn() throws IOException {
- }
-
- @Override
- public void setCacheOff() throws IOException {
- }
};
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
index 103fb733cf2..7d6c55b5c90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
@@ -71,7 +71,7 @@ public class TestReadAndWriteRegionInfoFile {
RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
// Create a region. That'll write the .regioninfo file.
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR);
- FSTableDescriptors.tryUpdateMetaTableDescriptor(CONF, FS, ROOT_DIR, null);
+ FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR, null);
HRegion r = HBaseTestingUtility.createRegionAndWAL(ri, ROOT_DIR, CONF,
fsTableDescriptors.get(TableName.META_TABLE_NAME));
// Get modtime on the file.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 554cb9d0517..996be3e8a26 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -93,7 +91,7 @@ public class TestFSTableDescriptors {
FileStatus[] statuses = fs.listStatus(testdir);
assertTrue("statuses.length=" + statuses.length, statuses.length == 1);
for (int i = 0; i < 10; i++) {
- fstd.updateTableDescriptor(htd);
+ fstd.update(htd);
}
statuses = fs.listStatus(testdir);
assertTrue(statuses.length == 1);
@@ -218,8 +216,7 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
@Override
- public TableDescriptor get(TableName tablename)
- throws TableExistsException, FileNotFoundException, IOException {
+ public TableDescriptor get(TableName tablename) {
LOG.info(tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
}
@@ -240,7 +237,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
- htds.updateTableDescriptor(builder.build());
+ htds.update(builder.build());
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@@ -276,7 +273,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
- htds.updateTableDescriptor(builder.build());
+ htds.update(builder.build());
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
@@ -371,13 +368,18 @@ public class TestFSTableDescriptors {
// random will only increase the cachehit by 1
assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1);
- for (Map.Entry entry: nonchtds.getAll().entrySet()) {
+ for (Map.Entry entry : chtds.getAll().entrySet()) {
String t = (String) entry.getKey();
TableDescriptor nchtd = entry.getValue();
- assertTrue("expected " + htd.toString() +
- " got: " + chtds.get(TableName.valueOf(t)).toString(),
- (nchtd.equals(chtds.get(TableName.valueOf(t)))));
+ assertTrue(
+ "expected " + htd.toString() + " got: " + chtds.get(TableName.valueOf(t)).toString(),
+ (nchtd.equals(chtds.get(TableName.valueOf(t)))));
}
+ // this is by design, for FSTableDescriptor with cache enabled, once we have done a full scan
+ // and load all the table descriptors to cache, we will not go to file system again, as the only
+ // way to update table descriptor is to through us so we can cache it when updating.
+ assertNotNull(nonchtds.get(random));
+ assertNull(chtds.get(random));
}
@Test
@@ -474,8 +476,7 @@ public class TestFSTableDescriptors {
}
@Override
- public TableDescriptor get(TableName tablename)
- throws TableExistsException, FileNotFoundException, IOException {
+ public TableDescriptor get(TableName tablename) {
LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
" TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);