HBASE-4714 Don't ship w/ icms enabled by default; REVERT -- I overcommitted by mistake

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1196117 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-11-01 16:45:31 +00:00
parent a47b0b3293
commit df053660bf
16 changed files with 317 additions and 461 deletions

View File

@ -731,7 +731,6 @@ Release 0.92.0 - Unreleased
HBASE-4699 Cleanup the UIs
HBASE-4552 Remove trivial 0.90 deprecated code from 0.92 and trunk.
(Jonathan Hsieh)
HBASE-4714 Don't ship w/ icms enabled by default
NEW FEATURES
HBASE-2001 Coprocessors: Colocate user code with regions (Mingjie Lai via

View File

@ -34,7 +34,7 @@
# Below are what we set by default. May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://wiki.apache.org/hadoop/PerformanceTuning
export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC"
export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
# Uncomment below to enable java garbage collection logging in the .out file.
# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"

View File

@ -119,10 +119,7 @@
must be explicitly enabled in HBase 0.90.x (Its defaulted to be on in
0.92.x HBase). See <code>hbase.hregion.memstore.mslab.enabled</code>
to true in your <classname>Configuration</classname>. See the cited
slides for background and detail<footnote><para>The latest jvms do better
regards fragmentation so make sure you are running a recent release.
Read down in the message,
<link xlink:href="http://osdir.com/ml/hotspot-gc-use/2011-11/msg00002.html">Identifying concurrent mode failures caused by fragmentation</link>.</para></footnote>.</para>
slides for background and detail.</para>
<para>For more information about GC logs, see <xref linkend="trouble.log.gc" />.
</para>
</section>

View File

@ -182,6 +182,9 @@ public final class HConstants {
/** Used to construct the name of the compaction directory during compaction */
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
/** The file name used to store HTD in HDFS */
public static final String TABLEINFO_NAME = ".tableinfo";
/** Default maximum file size */
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
/**
@ -336,9 +335,9 @@ public class MasterFileSystem {
private void createRootTableInfo(Path rd) throws IOException {
// Create ROOT tableInfo if required.
if (!FSTableDescriptors.isTableInfoExists(fs, rd,
if (!FSUtils.tableInfoExists(fs, rd,
Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
}
}
@ -421,7 +420,7 @@ public class MasterFileSystem {
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
FSUtils.createTableDescriptor(htableDescriptor, conf);
}
/**

View File

@ -29,21 +29,21 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.zookeeper.KeeperException;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.ServerName;
/**
* Handler to create a table.
@ -137,7 +137,7 @@ public class CreateTableHandler extends EventHandler {
// tableDir is created. Should we change below method to be createTable
// where we create table in tmp dir with its table descriptor file and then
// do rename to move it into place?
FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf);
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
final int batchSize =

View File

@ -19,29 +19,20 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
@ -51,6 +42,7 @@ import org.apache.hadoop.hbase.TableExistsException;
* the filesystem or can be read and write.
*/
public class FSTableDescriptors implements TableDescriptors {
private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
private final FileSystem fs;
private final Path rootdir;
@ -58,9 +50,6 @@ public class FSTableDescriptors implements TableDescriptors {
long cachehits = 0;
long invocations = 0;
/** The file name used to store HTD in HDFS */
public static final String TABLEINFO_NAME = ".tableinfo";
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
@ -141,7 +130,7 @@ public class FSTableDescriptors implements TableDescriptors {
// Check mod time has not changed (this is trip to NN).
long modtime =
FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, tablename);
FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename);
if (tdm != null) {
if (modtime <= tdm.getModtime()) {
cachehits++;
@ -149,7 +138,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
HTableDescriptor htd =
FSTableDescriptors.getTableDescriptor(this.fs, this.rootdir, tablename);
FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename);
if (htd == null) {
// More likely is above will throw a FileNotFoundException
throw new TableExistsException("No descriptor for " + tablename);
@ -192,9 +181,9 @@ public class FSTableDescriptors implements TableDescriptors {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
throw new NotImplementedException();
}
if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd);
long modtime =
FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
}
@ -212,317 +201,4 @@ public class FSTableDescriptors implements TableDescriptors {
TableDescriptorModtime tdm = this.cache.remove(tablename);
return tdm == null? null: tdm.getTableDescriptor();
}
/**
* Checks if <code>.tableinfo<code> exists for given table
*
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
String tableName) throws IOException {
FileStatus status =
FSTableDescriptors.getTableInfoPath(fs, rootdir, tableName);
return status == null? false: fs.exists(status.getPath());
}
private static FileStatus getTableInfoPath(final FileSystem fs,
final Path rootdir, final String tableName)
throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, tableName);
return getTableInfoPath(fs, tabledir);
}
private static FileStatus getTableInfoPath(final FileSystem fs,
final Path tabledir)
throws IOException {
FileStatus [] status = fs.listStatus(tabledir, new PathFilter() {
@Override
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_NAME);
}
});
if (status == null || status.length < 1) return null;
Arrays.sort(status, new TableInfoFileStatusComparator());
if (status.length > 1) {
// Clean away old versions of .tableinfo
for (int i = 1; i < status.length; i++) {
// Clean up old versions
if (!fs.delete(status[i].getPath(), false)) {
LOG.warn("Failed cleanup of " + status);
}
}
}
return status[0];
}
/**
* Compare {@link FileStatus} instances by {@link Path#getName()}.
* Returns in reverse order.
*/
static class TableInfoFileStatusComparator
implements Comparator<FileStatus> {
@Override
public int compare(FileStatus left, FileStatus right) {
return -left.compareTo(right);
}
}
/**
* Width of the sequenceid that is suffix on tableinfo.
*/
static final int WIDTH_OF_SEQUENCE_ID = 10;
/**
* Regex to eat up sequenceid suffix on a .tableinfo file.
*/
private static final Pattern SUFFIX =
Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
/*
* @param number
* @return Returns zero-prefixed 5-byte wide decimal version of passed
* number (Does absolute in case number is negative).
*/
static String formatTableInfoSequenceId(final int number) {
byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
int d = Math.abs(number);
for (int i = b.length - 1; i >= 0; i--) {
b[i] = (byte)((d % 10) + '0');
d /= 10;
}
return Bytes.toString(b);
}
/**
* @param p Path to a <code>.tableinfo</code> file.
* @return The current editid or 0 if none found.
*/
static int getTableInfoSequenceid(final Path p) {
if (p == null) return 0;
Matcher m = SUFFIX.matcher(p.getName());
return m.matches()? Integer.parseInt(m.group(2)): 0;
}
/**
* @param tabledir
* @param sequenceid
* @return Name of tableinfo file.
*/
static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
return new Path(tabledir,
TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
}
/**
* @param fs
* @param rootdir
* @param tableName
* @return Modification time for the table {@link #TABLEINFO_NAME} file
* or <code>0</code> if no tableinfo file found.
* @throws IOException
*/
static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
final String tableName)
throws IOException {
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
return status == null? 0: status.getModificationTime();
}
/**
* Get HTD from HDFS.
* @param fs
* @param hbaseRootDir
* @param tableName
* @return Descriptor or null if none found.
* @throws IOException
*/
public static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, byte[] tableName)
throws IOException {
return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
}
static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, String tableName) {
HTableDescriptor htd = null;
try {
htd = getTableDescriptor(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = " +
tableName , e);
} catch (IOException ioe) {
LOG.debug("Exception during readTableDecriptor. Current table name = " +
tableName , ioe);
}
return htd;
}
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
throws IOException, NullPointerException {
if (tableDir == null) throw new NullPointerException();
FileStatus status = getTableInfoPath(fs, tableDir);
if (status == null) return null;
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
HTableDescriptor hTableDescriptor = null;
try {
hTableDescriptor = new HTableDescriptor();
hTableDescriptor.readFields(fsDataInputStream);
} finally {
fsDataInputStream.close();
}
return hTableDescriptor;
}
/**
* Update table descriptor
* @param fs
* @param conf
* @param hTableDescriptor
* @return New tableinfo
* @throws IOException
*/
static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor hTableDescriptor)
throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
Path p = writeTableDescriptor(fs, hTableDescriptor, tabledir, true);
LOG.info("Updated tableinfo=" + p);
return p;
}
private static void writeHTD(final FileSystem fs, final Path p,
final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, true);
try {
htd.write(out);
out.write('\n');
out.write('\n');
out.write(Bytes.toBytes(htd.toString()));
} finally {
out.close();
}
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param fs
* @param htableDescriptor
* @param rootdir
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor) throws IOException {
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param fs
* @param htableDescriptor
* @param rootdir
* @param forceCreation
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
throws IOException {
FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
if (status != null) {
LOG.info("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
LOG.info("TableInfo already exists.. Skipping creation");
return false;
}
}
}
writeTableDescriptor(fs, htableDescriptor,
FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()),
forceCreation);
return true;
}
/**
* Deletes a table's directory from the file system if exists. Used in unit
* tests.
*/
public static void deleteTableDescriptorIfExists(String tableName,
Configuration conf) throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
FileStatus status =
FSTableDescriptors.getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
// The below deleteDirectory works for either file or directory.
if (fs.exists(status.getPath())) FSUtils.deleteDirectory(fs, status.getPath());
}
/**
* Called when we are creating a table to write out the tables' descriptor.
* @param fs
* @param hTableDescriptor
* @param tableDir
* @param forceCreation True if we are to force creation
* @param status The status of the current tableinfo; can be null
* @throws IOException
*/
private static Path writeTableDescriptor(FileSystem fs,
HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir);
int sequenceid = getTableInfoSequenceid(status == null? null: status.getPath());
Path tableInfoPath = null;
do {
sequenceid += 1;
tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
} while (fs.exists(tableInfoPath));
try {
writeHTD(fs, tableInfoPath, hTableDescriptor);
if (status != null) {
if (!fs.delete(status.getPath(), false)) {
LOG.warn("Failed delete of " + status.getPath());
}
}
} catch (IOException e) {
LOG.error("Unable to write the tabledescriptor in the path" + tableInfoPath
+ ".", e);
fs.delete(tableInfoPath, true);
throw e;
}
return tableInfoPath;
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param htableDescriptor
* @param conf
*/
public static boolean createTableDescriptor(
HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
return createTableDescriptor(htableDescriptor, conf, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param htableDescriptor
* @param conf
* @param forceCreation
*/
public static boolean createTableDescriptor(
HTableDescriptor htableDescriptor, Configuration conf,
boolean forceCreation) throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
forceCreation);
}
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.util;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@ -62,7 +63,7 @@ public abstract class FSUtils {
protected FSUtils() {
super();
}
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
@ -155,8 +156,7 @@ public abstract class FSUtils {
* @return true if dfs is in safemode, false otherwise.
*
*/
private static boolean isInSafeMode(FileSystem fs)
throws IOException {
private static boolean isInSafeMode(FileSystem fs) throws IOException {
// Refactored safe-mode check for HBASE-4510
if (fs instanceof DistributedFileSystem) {
Path rootPath = new Path("/");
@ -180,9 +180,8 @@ public abstract class FSUtils {
*/
public static void checkDfsSafeMode(final Configuration conf)
throws IOException {
Path rootDir = getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
if (isInSafeMode(fs, rootDir)) {
if (isInSafeMode(fs)) {
throw new IOException("File system is in safemode, it can't be written now");
}
}
@ -452,10 +451,9 @@ public abstract class FSUtils {
public static void waitOnSafeMode(final Configuration conf,
final long wait)
throws IOException {
Path rootDir = getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
// Make sure dfs is not in safe mode
while (isInSafeMode(fs, rootDir)) {
while (isInSafeMode(fs)) {
LOG.info("Waiting for dfs to exit safe mode...");
try {
Thread.sleep(wait);
@ -506,6 +504,21 @@ public abstract class FSUtils {
return fs.exists(rootRegionDir);
}
/**
* Checks if .tableinfo exists for given table
*
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public static boolean tableInfoExists(FileSystem fs, Path rootdir,
String tableName) throws IOException {
Path tablePath = getTableInfoPath(rootdir, tableName);
return fs.exists(tablePath);
}
/**
* Compute HDFS blocks distribution of a given file, or a portion of the file
* @param fs file system
@ -851,6 +864,35 @@ public abstract class FSUtils {
return tabledirs;
}
/**
* Get table info path for a table.
* @param rootdir
* @param tableName
* @return Table info path
*/
private static Path getTableInfoPath(Path rootdir, String tablename) {
Path tablePath = getTablePath(rootdir, tablename);
return new Path(tablePath, HConstants.TABLEINFO_NAME);
}
/**
* @param fs
* @param rootdir
* @param tablename
* @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file.
* @throws IOException
*/
public static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
final String tablename)
throws IOException {
Path p = getTableInfoPath(rootdir, tablename);
FileStatus [] status = fs.listStatus(p);
if (status == null || status.length < 1) {
throw new FileNotFoundException("No status for " + p.toString());
}
return status[0].getModificationTime();
}
public static Path getTablePath(Path rootdir, byte [] tableName) {
return getTablePath(rootdir, Bytes.toString(tableName));
}
@ -859,15 +901,235 @@ public abstract class FSUtils {
return new Path(rootdir, tableName);
}
/**
* @param conf
* @return Returns the filesystem of the hbase rootdir.
* @throws IOException
*/
public static FileSystem getCurrentFileSystem(Configuration conf)
private static FileSystem getCurrentFileSystem(Configuration conf)
throws IOException {
return getRootDir(conf).getFileSystem(conf);
}
/**
* Get HTableDescriptor
* @param config
* @param tableName
* @return HTableDescriptor for table
* @throws IOException
*/
public static HTableDescriptor getHTableDescriptor(Configuration config,
String tableName)
throws IOException {
Path path = getRootDir(config);
FileSystem fs = path.getFileSystem(config);
return getTableDescriptor(fs, path, tableName);
}
/**
* Get HTD from HDFS.
* @param fs
* @param hbaseRootDir
* @param tableName
* @return Descriptor or null if none found.
* @throws IOException
*/
public static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, byte[] tableName)
throws IOException {
return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
}
public static HTableDescriptor getTableDescriptor(FileSystem fs,
Path hbaseRootDir, String tableName) {
HTableDescriptor htd = null;
try {
htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName));
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = " +
tableName , e);
} catch (IOException ioe) {
LOG.debug("Exception during readTableDecriptor. Current table name = " +
tableName , ioe);
}
return htd;
}
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
throws IOException, NullPointerException {
if (tableDir == null) throw new NullPointerException();
Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME);
FSDataInputStream fsDataInputStream = fs.open(tableinfo);
HTableDescriptor hTableDescriptor = null;
try {
hTableDescriptor = new HTableDescriptor();
hTableDescriptor.readFields(fsDataInputStream);
} finally {
fsDataInputStream.close();
}
return hTableDescriptor;
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param htableDescriptor
* @param conf
*/
public static boolean createTableDescriptor(
HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
return createTableDescriptor(htableDescriptor, conf, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param htableDescriptor
* @param conf
* @param forceCreation
*/
public static boolean createTableDescriptor(
HTableDescriptor htableDescriptor, Configuration conf,
boolean forceCreation) throws IOException {
FileSystem fs = getCurrentFileSystem(conf);
return createTableDescriptor(fs, getRootDir(conf), htableDescriptor,
forceCreation);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param fs
* @param htableDescriptor
* @param rootdir
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor) throws IOException {
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param fs
* @param htableDescriptor
* @param rootdir
* @param forceCreation
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor, boolean forceCreation)
throws IOException {
Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor
.getNameAsString());
LOG.info("Current tableInfoPath = " + tableInfoPath);
if (!forceCreation) {
if (fs.exists(tableInfoPath)
&& fs.getFileStatus(tableInfoPath).getLen() > 0) {
LOG.info("TableInfo already exists.. Skipping creation");
return false;
}
}
writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir,
htableDescriptor.getNameAsString()), forceCreation);
return true;
}
/**
* Deletes a table's directory from the file system if exists. Used in unit
* tests.
*/
public static void deleteTableDescriptorIfExists(String tableName,
Configuration conf) throws IOException {
FileSystem fs = getCurrentFileSystem(conf);
Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName);
if (fs.exists(tableInfoPath))
deleteDirectory(fs, tableInfoPath);
}
/**
* Called when we are creating a table to write out the tables' descriptor.
* @param fs
* @param hTableDescriptor
* @param tableDir
* @param forceCreation True if we are to force creation
* @throws IOException
*/
private static void writeTableDescriptor(FileSystem fs,
HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
throws IOException {
// Create in tmpdir and then move into place in case we crash after
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
Path tmpPath = new Path(new Path(tableDir, ".tmp"),
HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis());
LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
try {
writeHTD(fs, tmpPath, hTableDescriptor);
} catch (IOException e) {
LOG.error("Unable to write the tabledescriptor in the path" + tmpPath
+ ".", e);
fs.delete(tmpPath, true);
throw e;
}
// TODO: The below is less than ideal and likely error prone. There is a
// better rename in hadoops after 0.20 that takes rename options (this has
// its own issues according to mighty Todd in that old readers may fail
// as we cross the renme transition) but until then, we have this
// forceCreation flag which does a delete and then we rename so there is a
// hole. Need to fix.
try {
if (forceCreation) {
if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) {
String errMsg = "Unable to delete " + tableInfoPath
+ " while forcefully writing the table descriptor.";
LOG.error(errMsg);
throw new IOException(errMsg);
}
}
if (!fs.rename(tmpPath, tableInfoPath)) {
String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath;
LOG.error(errMsg);
throw new IOException(errMsg);
} else {
LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
}
} finally {
fs.delete(tmpPath, true);
}
}
/**
* Update table descriptor
* @param fs
* @param rootdir
* @param hTableDescriptor
* @throws IOException
*/
public static void updateHTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor hTableDescriptor)
throws IOException {
Path tableInfoPath =
getTableInfoPath(rootdir, hTableDescriptor.getNameAsString());
writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true);
LOG.info("Updated tableinfo=" + tableInfoPath + " to " +
hTableDescriptor.toString());
}
private static void writeHTD(final FileSystem fs, final Path p,
final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, true);
try {
htd.write(out);
out.write('\n');
out.write('\n');
out.write(Bytes.toBytes(htd.toString()));
} finally {
out.close();
}
}
/**
* Runs through the HBase rootdir and creates a reverse lookup map for

View File

@ -152,7 +152,7 @@ class HMerge {
fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
Bytes.toString(tableName)
);
this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir);
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);

View File

@ -237,7 +237,7 @@ public class Merge extends Configured implements Tool {
if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta2);
}
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()),
this.rootdir, this.tableName);
HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);

View File

@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@ -357,7 +357,7 @@ public class TestMasterFailover {
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -369,7 +369,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
@ -681,7 +681,7 @@ public class TestMasterFailover {
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null);
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -693,7 +693,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.Test;
@ -69,11 +68,11 @@ public class TestHRegionInfo {
// Delete the temporary table directory that might still be there from the
// previous test run.
FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
FSUtils.deleteTableDescriptorIfExists(tablename,
HTU.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(tablename);
FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
FSUtils.createTableDescriptor(htd, HTU.getConfiguration());
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HTableDescriptor htd2 = hri.getTableDesc();

View File

@ -21,11 +21,9 @@ import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -44,58 +42,6 @@ public class TestFSTableDescriptors {
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class);
@Test
public void testSequenceidAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir();
HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p0));
int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
assertTrue(i1 == i0 + 1);
Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
// Assert we cleaned up the old file.
assertTrue(!fs.exists(p1));
int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
assertTrue(i2 == i1 + 1);
}
@Test
public void testFormatTableInfoSequenceId() {
Path p0 = assertWriteAndReadSequenceid(0);
// Assert p0 has format we expect.
StringBuilder sb = new StringBuilder();
for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
sb.append("0");
}
assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(),
p0.getName());
// Check a few more.
Path p2 = assertWriteAndReadSequenceid(2);
Path p10000 = assertWriteAndReadSequenceid(10000);
// Get a .tablinfo that has no sequenceid suffix.
Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
FSTableDescriptors.TableInfoFileStatusComparator comparator =
new FSTableDescriptors.TableInfoFileStatusComparator();
assertTrue(comparator.compare(fs, fs0) > 0);
assertTrue(comparator.compare(fs0, fs2) > 0);
assertTrue(comparator.compare(fs2, fs10000) > 0);
}
private Path assertWriteAndReadSequenceid(final int i) {
Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i);
int ii = FSTableDescriptors.getTableInfoSequenceid(p);
assertEquals(i, ii);
return p;
}
@Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
@ -116,14 +62,14 @@ public class TestFSTableDescriptors {
Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd);
HTableDescriptor htd2 =
FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
assertTrue(htd.equals(htd2));
}
private void createHTDInFS(final FileSystem fs, Path rootdir,
final HTableDescriptor htd)
throws IOException {
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
FSUtils.createTableDescriptor(fs, rootdir, htd);
}
@Test public void testHTableDescriptors()
@ -156,7 +102,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htd.addFamily(new HColumnDescriptor("" + i));
FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
FSUtils.updateHTableDescriptor(fs, rootdir, htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@ -175,7 +121,7 @@ public class TestFSTableDescriptors {
htds.cachehits >= ((count * 2) + 1));
}
@Test (expected=org.apache.hadoop.hbase.TableExistsException.class)
@Test (expected=java.io.FileNotFoundException.class)
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@ -197,30 +143,4 @@ public class TestFSTableDescriptors {
htds.add(htd);
htds.add(htd);
}
@Test
public void testTableInfoFileStatusComparator() {
FileStatus bare =
new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
FileStatus future =
new FileStatus(0, false, 0, 0, -1,
new Path("/tmp/tablinfo." + System.currentTimeMillis()));
FileStatus farFuture =
new FileStatus(0, false, 0, 0, -1,
new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
FileStatus [] alist = {bare, future, farFuture};
FileStatus [] blist = {bare, farFuture, future};
FileStatus [] clist = {farFuture, bare, future};
FSTableDescriptors.TableInfoFileStatusComparator c =
new FSTableDescriptors.TableInfoFileStatusComparator();
Arrays.sort(alist, c);
Arrays.sort(blist, c);
Arrays.sort(clist, c);
// Now assert all sorted same in way we want.
for (int i = 0; i < alist.length; i++) {
assertTrue(alist[i].equals(blist[i]));
assertTrue(blist[i].equals(clist[i]));
assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare));
}
}
}
}

View File

@ -35,6 +35,7 @@ import org.junit.Test;
* Test {@link FSUtils}.
*/
public class TestFSUtils {
@Test public void testIsHDFS() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false);

View File

@ -96,7 +96,7 @@ public class TestMergeTable {
// Create regions and populate them at same time. Create the tabledir
// for them first.
FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
FSUtils.createTableDescriptor(fs, rootdir, desc);
HRegion [] regions = {
createRegion(desc, null, row_70001, 1, 70000, rootdir),
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),

View File

@ -136,7 +136,7 @@ public class TestMergeTool extends HBaseTestCase {
try {
// Create root and meta regions
createRootAndMetaRegions();
FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc);
FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc);
/*
* Create the regions we will merge
*/