HBASE-4553 The update of .tableinfo is not atomic; we remove then rename
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1197812 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c824695af9
commit
bb0c9a11d8
|
@ -448,7 +448,7 @@ Release 0.92.0 - Unreleased
|
||||||
use new Addressing class instead (Jonathan Gray)
|
use new Addressing class instead (Jonathan Gray)
|
||||||
HBASE-4719 HBase script assumes pre-Hadoop 0.21 layout of jar files
|
HBASE-4719 HBase script assumes pre-Hadoop 0.21 layout of jar files
|
||||||
(Roman Shposhnik)
|
(Roman Shposhnik)
|
||||||
|
HBASE-4553 The update of .tableinfo is not atomic; we remove then rename
|
||||||
|
|
||||||
TESTS
|
TESTS
|
||||||
HBASE-4450 test for number of blocks read: to serve as baseline for expected
|
HBASE-4450 test for number of blocks read: to serve as baseline for expected
|
||||||
|
|
|
@ -182,9 +182,6 @@ public final class HConstants {
|
||||||
/** Used to construct the name of the compaction directory during compaction */
|
/** Used to construct the name of the compaction directory during compaction */
|
||||||
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
|
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
|
||||||
|
|
||||||
/** The file name used to store HTD in HDFS */
|
|
||||||
public static final String TABLEINFO_NAME = ".tableinfo";
|
|
||||||
|
|
||||||
/** Default maximum file size */
|
/** Default maximum file size */
|
||||||
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
|
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -360,9 +361,9 @@ public class MasterFileSystem {
|
||||||
|
|
||||||
private void createRootTableInfo(Path rd) throws IOException {
|
private void createRootTableInfo(Path rd) throws IOException {
|
||||||
// Create ROOT tableInfo if required.
|
// Create ROOT tableInfo if required.
|
||||||
if (!FSUtils.tableInfoExists(fs, rd,
|
if (!FSTableDescriptors.isTableInfoExists(fs, rd,
|
||||||
Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
|
Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
|
||||||
FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
|
FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,7 +452,7 @@ public class MasterFileSystem {
|
||||||
*/
|
*/
|
||||||
public void createTableDescriptor(HTableDescriptor htableDescriptor)
|
public void createTableDescriptor(HTableDescriptor htableDescriptor)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
FSUtils.createTableDescriptor(htableDescriptor, conf);
|
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -29,21 +29,21 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.hadoop.hbase.catalog.CatalogTracker;
|
import org.apache.hadoop.hbase.catalog.CatalogTracker;
|
||||||
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||||
import org.apache.hadoop.hbase.executor.EventHandler;
|
import org.apache.hadoop.hbase.executor.EventHandler;
|
||||||
import org.apache.hadoop.hbase.master.AssignmentManager;
|
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||||
import org.apache.hadoop.hbase.master.ServerManager;
|
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.hadoop.hbase.master.ServerManager;
|
||||||
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
|
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handler to create a table.
|
* Handler to create a table.
|
||||||
|
@ -137,7 +137,7 @@ public class CreateTableHandler extends EventHandler {
|
||||||
// tableDir is created. Should we change below method to be createTable
|
// tableDir is created. Should we change below method to be createTable
|
||||||
// where we create table in tmp dir with its table descriptor file and then
|
// where we create table in tmp dir with its table descriptor file and then
|
||||||
// do rename to move it into place?
|
// do rename to move it into place?
|
||||||
FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf);
|
FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
|
||||||
|
|
||||||
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
|
||||||
final int batchSize =
|
final int batchSize =
|
||||||
|
|
|
@ -19,20 +19,29 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.lang.NotImplementedException;
|
import org.apache.commons.lang.NotImplementedException;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableDescriptors;
|
import org.apache.hadoop.hbase.TableDescriptors;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -40,9 +49,19 @@ import org.apache.commons.logging.LogFactory;
|
||||||
* passed filesystem. It expects descriptors to be in a file under the
|
* passed filesystem. It expects descriptors to be in a file under the
|
||||||
* table's directory in FS. Can be read-only -- i.e. does not modify
|
* table's directory in FS. Can be read-only -- i.e. does not modify
|
||||||
* the filesystem or can be read and write.
|
* the filesystem or can be read and write.
|
||||||
|
*
|
||||||
|
* <p>Also has utility for keeping up the table descriptors tableinfo file.
|
||||||
|
* The table schema file is kept under the table directory in the filesystem.
|
||||||
|
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the
|
||||||
|
* edit sequenceid: e.g. <code>.tableinfo.0000000003</code>. This sequenceid
|
||||||
|
* is always increasing. It starts at zero. The table schema file with the
|
||||||
|
* highest sequenceid has the most recent schema edit. Usually there is one file
|
||||||
|
* only, the most recent but there may be short periods where there are more
|
||||||
|
* than one file. Old files are eventually cleaned. Presumption is that there
|
||||||
|
* will not be lots of concurrent clients making table schema edits. If so,
|
||||||
|
* the below needs a bit of a reworking and perhaps some supporting api in hdfs.
|
||||||
*/
|
*/
|
||||||
public class FSTableDescriptors implements TableDescriptors {
|
public class FSTableDescriptors implements TableDescriptors {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
|
private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
|
||||||
private final FileSystem fs;
|
private final FileSystem fs;
|
||||||
private final Path rootdir;
|
private final Path rootdir;
|
||||||
|
@ -50,6 +69,9 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
long cachehits = 0;
|
long cachehits = 0;
|
||||||
long invocations = 0;
|
long invocations = 0;
|
||||||
|
|
||||||
|
/** The file name used to store HTD in HDFS */
|
||||||
|
public static final String TABLEINFO_NAME = ".tableinfo";
|
||||||
|
|
||||||
// This cache does not age out the old stuff. Thinking is that the amount
|
// This cache does not age out the old stuff. Thinking is that the amount
|
||||||
// of data we keep up in here is so small, no need to do occasional purge.
|
// of data we keep up in here is so small, no need to do occasional purge.
|
||||||
// TODO.
|
// TODO.
|
||||||
|
@ -129,16 +151,14 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
TableDescriptorModtime tdm = this.cache.get(tablename);
|
TableDescriptorModtime tdm = this.cache.get(tablename);
|
||||||
|
|
||||||
// Check mod time has not changed (this is trip to NN).
|
// Check mod time has not changed (this is trip to NN).
|
||||||
long modtime =
|
long modtime = getTableInfoModtime(this.fs, this.rootdir, tablename);
|
||||||
FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename);
|
|
||||||
if (tdm != null) {
|
if (tdm != null) {
|
||||||
if (modtime <= tdm.getModtime()) {
|
if (modtime <= tdm.getModtime()) {
|
||||||
cachehits++;
|
cachehits++;
|
||||||
return tdm.getTableDescriptor();
|
return tdm.getTableDescriptor();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HTableDescriptor htd =
|
HTableDescriptor htd = getTableDescriptor(this.fs, this.rootdir, tablename);
|
||||||
FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename);
|
|
||||||
if (htd == null) {
|
if (htd == null) {
|
||||||
// More likely is above will throw a FileNotFoundException
|
// More likely is above will throw a FileNotFoundException
|
||||||
throw new TableExistsException("No descriptor for " + tablename);
|
throw new TableExistsException("No descriptor for " + tablename);
|
||||||
|
@ -181,9 +201,8 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
|
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
|
||||||
throw new NotImplementedException();
|
throw new NotImplementedException();
|
||||||
}
|
}
|
||||||
if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd);
|
if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
|
||||||
long modtime =
|
long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
|
||||||
FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
|
|
||||||
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
|
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,4 +220,362 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
TableDescriptorModtime tdm = this.cache.remove(tablename);
|
TableDescriptorModtime tdm = this.cache.remove(tablename);
|
||||||
return tdm == null? null: tdm.getTableDescriptor();
|
return tdm == null? null: tdm.getTableDescriptor();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
/**
|
||||||
|
* Checks if <code>.tableinfo<code> exists for given table
|
||||||
|
*
|
||||||
|
* @param fs file system
|
||||||
|
* @param rootdir root directory of HBase installation
|
||||||
|
* @param tableName name of table
|
||||||
|
* @return true if exists
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
|
||||||
|
String tableName) throws IOException {
|
||||||
|
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
|
||||||
|
return status == null? false: fs.exists(status.getPath());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static FileStatus getTableInfoPath(final FileSystem fs,
|
||||||
|
final Path rootdir, final String tableName)
|
||||||
|
throws IOException {
|
||||||
|
Path tabledir = FSUtils.getTablePath(rootdir, tableName);
|
||||||
|
return getTableInfoPath(fs, tabledir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Looks under the table directory in the filesystem for files with a
|
||||||
|
* {@link #TABLEINFO_NAME} prefix. Returns reference to the 'latest' instance.
|
||||||
|
* @param fs
|
||||||
|
* @param tabledir
|
||||||
|
* @return The 'current' tableinfo file.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static FileStatus getTableInfoPath(final FileSystem fs,
|
||||||
|
final Path tabledir)
|
||||||
|
throws IOException {
|
||||||
|
FileStatus [] status = fs.listStatus(tabledir, new PathFilter() {
|
||||||
|
@Override
|
||||||
|
public boolean accept(Path p) {
|
||||||
|
// Accept any file that starts with TABLEINFO_NAME
|
||||||
|
return p.getName().startsWith(TABLEINFO_NAME);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (status == null || status.length < 1) return null;
|
||||||
|
Arrays.sort(status, new FileStatusFileNameComparator());
|
||||||
|
if (status.length > 1) {
|
||||||
|
// Clean away old versions of .tableinfo
|
||||||
|
for (int i = 1; i < status.length; i++) {
|
||||||
|
Path p = status[i].getPath();
|
||||||
|
// Clean up old versions
|
||||||
|
if (!fs.delete(p, false)) {
|
||||||
|
LOG.warn("Failed cleanup of " + status);
|
||||||
|
} else {
|
||||||
|
LOG.debug("Cleaned up old tableinfo file " + p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return status[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compare {@link FileStatus} instances by {@link Path#getName()}.
|
||||||
|
* Returns in reverse order.
|
||||||
|
*/
|
||||||
|
static class FileStatusFileNameComparator
|
||||||
|
implements Comparator<FileStatus> {
|
||||||
|
@Override
|
||||||
|
public int compare(FileStatus left, FileStatus right) {
|
||||||
|
return -left.compareTo(right);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Width of the sequenceid that is a suffix on a tableinfo file.
|
||||||
|
*/
|
||||||
|
static final int WIDTH_OF_SEQUENCE_ID = 10;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @param number Number to use as suffix.
|
||||||
|
* @return Returns zero-prefixed 5-byte wide decimal version of passed
|
||||||
|
* number (Does absolute in case number is negative).
|
||||||
|
*/
|
||||||
|
static String formatTableInfoSequenceId(final int number) {
|
||||||
|
byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
|
||||||
|
int d = Math.abs(number);
|
||||||
|
for (int i = b.length - 1; i >= 0; i--) {
|
||||||
|
b[i] = (byte)((d % 10) + '0');
|
||||||
|
d /= 10;
|
||||||
|
}
|
||||||
|
return Bytes.toString(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regex to eat up sequenceid suffix on a .tableinfo file.
|
||||||
|
* Use regex because may encounter oldstyle .tableinfos where there is no
|
||||||
|
* sequenceid on the end.
|
||||||
|
*/
|
||||||
|
private static final Pattern SUFFIX =
|
||||||
|
Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param p Path to a <code>.tableinfo</code> file.
|
||||||
|
* @return The current editid or 0 if none found.
|
||||||
|
*/
|
||||||
|
static int getTableInfoSequenceid(final Path p) {
|
||||||
|
if (p == null) return 0;
|
||||||
|
Matcher m = SUFFIX.matcher(p.getName());
|
||||||
|
if (!m.matches()) throw new IllegalArgumentException(p.toString());
|
||||||
|
String suffix = m.group(2);
|
||||||
|
if (suffix == null || suffix.length() <= 0) return 0;
|
||||||
|
return Integer.parseInt(m.group(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param tabledir
|
||||||
|
* @param sequenceid
|
||||||
|
* @return Name of tableinfo file.
|
||||||
|
*/
|
||||||
|
static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
|
||||||
|
return new Path(tabledir,
|
||||||
|
TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fs
|
||||||
|
* @param rootdir
|
||||||
|
* @param tableName
|
||||||
|
* @return Modification time for the table {@link #TABLEINFO_NAME} file
|
||||||
|
* or <code>0</code> if no tableinfo file found.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
|
||||||
|
final String tableName)
|
||||||
|
throws IOException {
|
||||||
|
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
|
||||||
|
return status == null? 0: status.getModificationTime();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get HTD from HDFS.
|
||||||
|
* @param fs
|
||||||
|
* @param hbaseRootDir
|
||||||
|
* @param tableName
|
||||||
|
* @return Descriptor or null if none found.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static HTableDescriptor getTableDescriptor(FileSystem fs,
|
||||||
|
Path hbaseRootDir, byte[] tableName)
|
||||||
|
throws IOException {
|
||||||
|
return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
static HTableDescriptor getTableDescriptor(FileSystem fs,
|
||||||
|
Path hbaseRootDir, String tableName) {
|
||||||
|
HTableDescriptor htd = null;
|
||||||
|
try {
|
||||||
|
htd = getTableDescriptor(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
LOG.debug("Exception during readTableDecriptor. Current table name = " +
|
||||||
|
tableName , e);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.debug("Exception during readTableDecriptor. Current table name = " +
|
||||||
|
tableName , ioe);
|
||||||
|
}
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
|
||||||
|
throws IOException, NullPointerException {
|
||||||
|
if (tableDir == null) throw new NullPointerException();
|
||||||
|
FileStatus status = getTableInfoPath(fs, tableDir);
|
||||||
|
if (status == null) return null;
|
||||||
|
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
|
||||||
|
HTableDescriptor hTableDescriptor = null;
|
||||||
|
try {
|
||||||
|
hTableDescriptor = new HTableDescriptor();
|
||||||
|
hTableDescriptor.readFields(fsDataInputStream);
|
||||||
|
} finally {
|
||||||
|
fsDataInputStream.close();
|
||||||
|
}
|
||||||
|
return hTableDescriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update table descriptor
|
||||||
|
* @param fs
|
||||||
|
* @param conf
|
||||||
|
* @param hTableDescriptor
|
||||||
|
* @return New tableinfo or null if we failed update.
|
||||||
|
* @throws IOException Thrown if failed update.
|
||||||
|
*/
|
||||||
|
static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
|
||||||
|
HTableDescriptor hTableDescriptor)
|
||||||
|
throws IOException {
|
||||||
|
Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
|
||||||
|
Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir,
|
||||||
|
getTableInfoPath(fs, tableDir));
|
||||||
|
if (p == null) throw new IOException("Failed update");
|
||||||
|
LOG.info("Updated tableinfo=" + p);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes a table's directory from the file system if exists. Used in unit
|
||||||
|
* tests.
|
||||||
|
*/
|
||||||
|
public static void deleteTableDescriptorIfExists(String tableName,
|
||||||
|
Configuration conf) throws IOException {
|
||||||
|
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
|
||||||
|
FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
|
||||||
|
// The below deleteDirectory works for either file or directory.
|
||||||
|
if (fs.exists(status.getPath())) FSUtils.deleteDirectory(fs, status.getPath());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param fs
|
||||||
|
* @param hTableDescriptor
|
||||||
|
* @param tableDir
|
||||||
|
* @param status
|
||||||
|
* @return Descriptor file or null if we failed write.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private static Path writeTableDescriptor(final FileSystem fs,
|
||||||
|
final HTableDescriptor hTableDescriptor, final Path tableDir,
|
||||||
|
final FileStatus status)
|
||||||
|
throws IOException {
|
||||||
|
// Get temporary dir into which we'll first write a file to avoid
|
||||||
|
// half-written file phenomeon.
|
||||||
|
Path tmpTableDir = new Path(tableDir, ".tmp");
|
||||||
|
// What is current sequenceid? We read the current sequenceid from
|
||||||
|
// the current file. After we read it, another thread could come in and
|
||||||
|
// compete with us writing out next version of file. The below retries
|
||||||
|
// should help in this case some but its hard to do guarantees in face of
|
||||||
|
// concurrent schema edits.
|
||||||
|
int currentSequenceid =
|
||||||
|
status == null? 0: getTableInfoSequenceid(status.getPath());
|
||||||
|
int sequenceid = currentSequenceid;
|
||||||
|
// Put arbitrary upperbound on how often we retry
|
||||||
|
int retries = 10;
|
||||||
|
int retrymax = currentSequenceid + retries;
|
||||||
|
Path tableInfoPath = null;
|
||||||
|
do {
|
||||||
|
sequenceid += 1;
|
||||||
|
Path p = getTableInfoFileName(tmpTableDir, sequenceid);
|
||||||
|
if (fs.exists(p)) {
|
||||||
|
LOG.debug(p + " exists; retrying up to " + retries + " times");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
writeHTD(fs, p, hTableDescriptor);
|
||||||
|
tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
|
||||||
|
if (!fs.rename(p, tableInfoPath)) {
|
||||||
|
throw new IOException("Failed rename of " + p + " to " + tableInfoPath);
|
||||||
|
}
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
// Presume clash of names or something; go around again.
|
||||||
|
LOG.debug("Failed write and/or rename; retrying", ioe);
|
||||||
|
if (!FSUtils.deleteDirectory(fs, p)) {
|
||||||
|
LOG.warn("Failed cleanup of " + p);
|
||||||
|
}
|
||||||
|
tableInfoPath = null;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Cleanup old schema file.
|
||||||
|
if (status != null) {
|
||||||
|
if (!FSUtils.deleteDirectory(fs, status.getPath())) {
|
||||||
|
LOG.warn("Failed delete of " + status.getPath() + "; continuing");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
} while (sequenceid < retrymax);
|
||||||
|
return tableInfoPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void writeHTD(final FileSystem fs, final Path p,
|
||||||
|
final HTableDescriptor htd)
|
||||||
|
throws IOException {
|
||||||
|
FSDataOutputStream out = fs.create(p, false);
|
||||||
|
try {
|
||||||
|
htd.write(out);
|
||||||
|
out.write('\n');
|
||||||
|
out.write('\n');
|
||||||
|
out.write(Bytes.toBytes(htd.toString()));
|
||||||
|
} finally {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
|
||||||
|
*
|
||||||
|
* @param htableDescriptor
|
||||||
|
* @param conf
|
||||||
|
*/
|
||||||
|
public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
|
||||||
|
Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
return createTableDescriptor(htableDescriptor, conf, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
|
||||||
|
* forceCreation is true then even if previous table descriptor is present it
|
||||||
|
* will be overwritten
|
||||||
|
*
|
||||||
|
* @param htableDescriptor
|
||||||
|
* @param conf
|
||||||
|
* @param forceCreation True if we are to overwrite existing file.
|
||||||
|
*/
|
||||||
|
static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
|
||||||
|
final Configuration conf, boolean forceCreation)
|
||||||
|
throws IOException {
|
||||||
|
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
|
||||||
|
return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
|
||||||
|
forceCreation);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
|
||||||
|
* Used by tests.
|
||||||
|
* @param fs
|
||||||
|
* @param htableDescriptor
|
||||||
|
* @param rootdir
|
||||||
|
*/
|
||||||
|
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
|
||||||
|
HTableDescriptor htableDescriptor)
|
||||||
|
throws IOException {
|
||||||
|
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
|
||||||
|
* forceCreation is true then even if previous table descriptor is present it
|
||||||
|
* will be overwritten
|
||||||
|
*
|
||||||
|
* @param fs
|
||||||
|
* @param htableDescriptor
|
||||||
|
* @param rootdir
|
||||||
|
* @param forceCreation
|
||||||
|
* @return True if we successfully created file.
|
||||||
|
*/
|
||||||
|
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
|
||||||
|
HTableDescriptor htableDescriptor, boolean forceCreation)
|
||||||
|
throws IOException {
|
||||||
|
FileStatus status =
|
||||||
|
getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
|
||||||
|
if (status != null) {
|
||||||
|
LOG.info("Current tableInfoPath = " + status.getPath());
|
||||||
|
if (!forceCreation) {
|
||||||
|
if (fs.exists(status.getPath()) && status.getLen() > 0) {
|
||||||
|
LOG.info("TableInfo already exists.. Skipping creation");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Path p = writeTableDescriptor(fs, htableDescriptor,
|
||||||
|
FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), status);
|
||||||
|
return p != null;
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.EOFException;
|
import java.io.EOFException;
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
@ -43,7 +42,6 @@ import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -61,7 +59,7 @@ public abstract class FSUtils {
|
||||||
protected FSUtils() {
|
protected FSUtils() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
|
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
|
||||||
String scheme = fs.getUri().getScheme();
|
String scheme = fs.getUri().getScheme();
|
||||||
if (scheme == null) {
|
if (scheme == null) {
|
||||||
|
@ -489,21 +487,6 @@ public abstract class FSUtils {
|
||||||
return fs.exists(rootRegionDir);
|
return fs.exists(rootRegionDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if .tableinfo exists for given table
|
|
||||||
*
|
|
||||||
* @param fs file system
|
|
||||||
* @param rootdir root directory of HBase installation
|
|
||||||
* @param tableName name of table
|
|
||||||
* @return true if exists
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static boolean tableInfoExists(FileSystem fs, Path rootdir,
|
|
||||||
String tableName) throws IOException {
|
|
||||||
Path tablePath = getTableInfoPath(rootdir, tableName);
|
|
||||||
return fs.exists(tablePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compute HDFS blocks distribution of a given file, or a portion of the file
|
* Compute HDFS blocks distribution of a given file, or a portion of the file
|
||||||
* @param fs file system
|
* @param fs file system
|
||||||
|
@ -849,35 +832,6 @@ public abstract class FSUtils {
|
||||||
return tabledirs;
|
return tabledirs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get table info path for a table.
|
|
||||||
* @param rootdir
|
|
||||||
* @param tableName
|
|
||||||
* @return Table info path
|
|
||||||
*/
|
|
||||||
private static Path getTableInfoPath(Path rootdir, String tablename) {
|
|
||||||
Path tablePath = getTablePath(rootdir, tablename);
|
|
||||||
return new Path(tablePath, HConstants.TABLEINFO_NAME);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param fs
|
|
||||||
* @param rootdir
|
|
||||||
* @param tablename
|
|
||||||
* @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
|
|
||||||
final String tablename)
|
|
||||||
throws IOException {
|
|
||||||
Path p = getTableInfoPath(rootdir, tablename);
|
|
||||||
FileStatus [] status = fs.listStatus(p);
|
|
||||||
if (status == null || status.length < 1) {
|
|
||||||
throw new FileNotFoundException("No status for " + p.toString());
|
|
||||||
}
|
|
||||||
return status[0].getModificationTime();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Path getTablePath(Path rootdir, byte [] tableName) {
|
public static Path getTablePath(Path rootdir, byte [] tableName) {
|
||||||
return getTablePath(rootdir, Bytes.toString(tableName));
|
return getTablePath(rootdir, Bytes.toString(tableName));
|
||||||
}
|
}
|
||||||
|
@ -886,235 +840,15 @@ public abstract class FSUtils {
|
||||||
return new Path(rootdir, tableName);
|
return new Path(rootdir, tableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static FileSystem getCurrentFileSystem(Configuration conf)
|
/**
|
||||||
|
* @param conf
|
||||||
|
* @return Returns the filesystem of the hbase rootdir.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static FileSystem getCurrentFileSystem(Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getRootDir(conf).getFileSystem(conf);
|
return getRootDir(conf).getFileSystem(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get HTableDescriptor
|
|
||||||
* @param config
|
|
||||||
* @param tableName
|
|
||||||
* @return HTableDescriptor for table
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static HTableDescriptor getHTableDescriptor(Configuration config,
|
|
||||||
String tableName)
|
|
||||||
throws IOException {
|
|
||||||
Path path = getRootDir(config);
|
|
||||||
FileSystem fs = path.getFileSystem(config);
|
|
||||||
return getTableDescriptor(fs, path, tableName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get HTD from HDFS.
|
|
||||||
* @param fs
|
|
||||||
* @param hbaseRootDir
|
|
||||||
* @param tableName
|
|
||||||
* @return Descriptor or null if none found.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static HTableDescriptor getTableDescriptor(FileSystem fs,
|
|
||||||
Path hbaseRootDir, byte[] tableName)
|
|
||||||
throws IOException {
|
|
||||||
return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
|
|
||||||
}
|
|
||||||
|
|
||||||
public static HTableDescriptor getTableDescriptor(FileSystem fs,
|
|
||||||
Path hbaseRootDir, String tableName) {
|
|
||||||
HTableDescriptor htd = null;
|
|
||||||
try {
|
|
||||||
htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName));
|
|
||||||
} catch (NullPointerException e) {
|
|
||||||
LOG.debug("Exception during readTableDecriptor. Current table name = " +
|
|
||||||
tableName , e);
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
LOG.debug("Exception during readTableDecriptor. Current table name = " +
|
|
||||||
tableName , ioe);
|
|
||||||
}
|
|
||||||
return htd;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
|
|
||||||
throws IOException, NullPointerException {
|
|
||||||
if (tableDir == null) throw new NullPointerException();
|
|
||||||
Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME);
|
|
||||||
FSDataInputStream fsDataInputStream = fs.open(tableinfo);
|
|
||||||
HTableDescriptor hTableDescriptor = null;
|
|
||||||
try {
|
|
||||||
hTableDescriptor = new HTableDescriptor();
|
|
||||||
hTableDescriptor.readFields(fsDataInputStream);
|
|
||||||
} finally {
|
|
||||||
fsDataInputStream.close();
|
|
||||||
}
|
|
||||||
return hTableDescriptor;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
|
|
||||||
*
|
|
||||||
* @param htableDescriptor
|
|
||||||
* @param conf
|
|
||||||
*/
|
|
||||||
public static boolean createTableDescriptor(
|
|
||||||
HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
|
|
||||||
return createTableDescriptor(htableDescriptor, conf, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
|
|
||||||
* forceCreation is true then even if previous table descriptor is present it
|
|
||||||
* will be overwritten
|
|
||||||
*
|
|
||||||
* @param htableDescriptor
|
|
||||||
* @param conf
|
|
||||||
* @param forceCreation
|
|
||||||
*/
|
|
||||||
public static boolean createTableDescriptor(
|
|
||||||
HTableDescriptor htableDescriptor, Configuration conf,
|
|
||||||
boolean forceCreation) throws IOException {
|
|
||||||
FileSystem fs = getCurrentFileSystem(conf);
|
|
||||||
return createTableDescriptor(fs, getRootDir(conf), htableDescriptor,
|
|
||||||
forceCreation);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
|
|
||||||
*
|
|
||||||
* @param fs
|
|
||||||
* @param htableDescriptor
|
|
||||||
* @param rootdir
|
|
||||||
*/
|
|
||||||
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
|
|
||||||
HTableDescriptor htableDescriptor) throws IOException {
|
|
||||||
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
|
|
||||||
* forceCreation is true then even if previous table descriptor is present it
|
|
||||||
* will be overwritten
|
|
||||||
*
|
|
||||||
* @param fs
|
|
||||||
* @param htableDescriptor
|
|
||||||
* @param rootdir
|
|
||||||
* @param forceCreation
|
|
||||||
*/
|
|
||||||
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
|
|
||||||
HTableDescriptor htableDescriptor, boolean forceCreation)
|
|
||||||
throws IOException {
|
|
||||||
Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor
|
|
||||||
.getNameAsString());
|
|
||||||
LOG.info("Current tableInfoPath = " + tableInfoPath);
|
|
||||||
if (!forceCreation) {
|
|
||||||
if (fs.exists(tableInfoPath)
|
|
||||||
&& fs.getFileStatus(tableInfoPath).getLen() > 0) {
|
|
||||||
LOG.info("TableInfo already exists.. Skipping creation");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir,
|
|
||||||
htableDescriptor.getNameAsString()), forceCreation);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deletes a table's directory from the file system if exists. Used in unit
|
|
||||||
* tests.
|
|
||||||
*/
|
|
||||||
public static void deleteTableDescriptorIfExists(String tableName,
|
|
||||||
Configuration conf) throws IOException {
|
|
||||||
FileSystem fs = getCurrentFileSystem(conf);
|
|
||||||
Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName);
|
|
||||||
if (fs.exists(tableInfoPath))
|
|
||||||
deleteDirectory(fs, tableInfoPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when we are creating a table to write out the tables' descriptor.
|
|
||||||
* @param fs
|
|
||||||
* @param hTableDescriptor
|
|
||||||
* @param tableDir
|
|
||||||
* @param forceCreation True if we are to force creation
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
private static void writeTableDescriptor(FileSystem fs,
|
|
||||||
HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
|
|
||||||
throws IOException {
|
|
||||||
// Create in tmpdir and then move into place in case we crash after
|
|
||||||
// create but before close. If we don't successfully close the file,
|
|
||||||
// subsequent region reopens will fail the below because create is
|
|
||||||
// registered in NN.
|
|
||||||
Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
|
|
||||||
Path tmpPath = new Path(new Path(tableDir, ".tmp"),
|
|
||||||
HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis());
|
|
||||||
LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
|
|
||||||
try {
|
|
||||||
writeHTD(fs, tmpPath, hTableDescriptor);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("Unable to write the tabledescriptor in the path" + tmpPath
|
|
||||||
+ ".", e);
|
|
||||||
fs.delete(tmpPath, true);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
// TODO: The below is less than ideal and likely error prone. There is a
|
|
||||||
// better rename in hadoops after 0.20 that takes rename options (this has
|
|
||||||
// its own issues according to mighty Todd in that old readers may fail
|
|
||||||
// as we cross the renme transition) but until then, we have this
|
|
||||||
// forceCreation flag which does a delete and then we rename so there is a
|
|
||||||
// hole. Need to fix.
|
|
||||||
try {
|
|
||||||
if (forceCreation) {
|
|
||||||
if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) {
|
|
||||||
String errMsg = "Unable to delete " + tableInfoPath
|
|
||||||
+ " while forcefully writing the table descriptor.";
|
|
||||||
LOG.error(errMsg);
|
|
||||||
throw new IOException(errMsg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!fs.rename(tmpPath, tableInfoPath)) {
|
|
||||||
String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath;
|
|
||||||
LOG.error(errMsg);
|
|
||||||
throw new IOException(errMsg);
|
|
||||||
} else {
|
|
||||||
LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
fs.delete(tmpPath, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Update table descriptor
|
|
||||||
* @param fs
|
|
||||||
* @param rootdir
|
|
||||||
* @param hTableDescriptor
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static void updateHTableDescriptor(FileSystem fs, Path rootdir,
|
|
||||||
HTableDescriptor hTableDescriptor)
|
|
||||||
throws IOException {
|
|
||||||
Path tableInfoPath =
|
|
||||||
getTableInfoPath(rootdir, hTableDescriptor.getNameAsString());
|
|
||||||
writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true);
|
|
||||||
LOG.info("Updated tableinfo=" + tableInfoPath + " to " +
|
|
||||||
hTableDescriptor.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void writeHTD(final FileSystem fs, final Path p,
|
|
||||||
final HTableDescriptor htd)
|
|
||||||
throws IOException {
|
|
||||||
FSDataOutputStream out = fs.create(p, true);
|
|
||||||
try {
|
|
||||||
htd.write(out);
|
|
||||||
out.write('\n');
|
|
||||||
out.write('\n');
|
|
||||||
out.write(Bytes.toBytes(htd.toString()));
|
|
||||||
} finally {
|
|
||||||
out.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs through the HBase rootdir and creates a reverse lookup map for
|
* Runs through the HBase rootdir and creates a reverse lookup map for
|
||||||
|
|
|
@ -152,7 +152,7 @@ class HMerge {
|
||||||
fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
|
fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
|
||||||
Bytes.toString(tableName)
|
Bytes.toString(tableName)
|
||||||
);
|
);
|
||||||
this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir);
|
this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
|
||||||
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
||||||
HConstants.HREGION_LOGDIR_NAME);
|
HConstants.HREGION_LOGDIR_NAME);
|
||||||
Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
|
Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
|
||||||
|
|
|
@ -237,7 +237,7 @@ public class Merge extends Configured implements Tool {
|
||||||
if (info2 == null) {
|
if (info2 == null) {
|
||||||
throw new NullPointerException("info2 is null using key " + meta2);
|
throw new NullPointerException("info2 is null using key " + meta2);
|
||||||
}
|
}
|
||||||
HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()),
|
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
|
||||||
this.rootdir, this.tableName);
|
this.rootdir, this.tableName);
|
||||||
HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);
|
HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.lang.reflect.Field;
|
import java.lang.reflect.Field;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
|
@ -62,7 +61,6 @@ import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
@ -1131,20 +1129,6 @@ public class HBaseTestingUtility {
|
||||||
byte [] firstrow = metaRows.get(0);
|
byte [] firstrow = metaRows.get(0);
|
||||||
LOG.debug("FirstRow=" + Bytes.toString(firstrow));
|
LOG.debug("FirstRow=" + Bytes.toString(firstrow));
|
||||||
int index = hbaseCluster.getServerWith(firstrow);
|
int index = hbaseCluster.getServerWith(firstrow);
|
||||||
long start = EnvironmentEdgeManager.currentTimeMillis();
|
|
||||||
int timeout = 3000; // 3sec timeout
|
|
||||||
while (index == -1 &&
|
|
||||||
EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
|
|
||||||
try {
|
|
||||||
// wait for the region to come online
|
|
||||||
Thread.sleep(50);
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
IOException t = new InterruptedIOException();
|
|
||||||
t.initCause(ie);
|
|
||||||
throw t;
|
|
||||||
}
|
|
||||||
index = hbaseCluster.getServerWith(firstrow);
|
|
||||||
}
|
|
||||||
return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
|
return hbaseCluster.getRegionServerThreads().get(index).getRegionServer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
|
@ -63,7 +64,8 @@ public class TestDrainingServer {
|
||||||
HBaseTestingUtility.KEYS);
|
HBaseTestingUtility.KEYS);
|
||||||
// Make a mark for the table in the filesystem.
|
// Make a mark for the table in the filesystem.
|
||||||
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
|
||||||
FSUtils.createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd);
|
FSTableDescriptors.
|
||||||
|
createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd);
|
||||||
// Assign out the regions we just created.
|
// Assign out the regions we just created.
|
||||||
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
|
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
|
||||||
admin.disableTable(TABLENAME);
|
admin.disableTable(TABLENAME);
|
||||||
|
@ -193,4 +195,4 @@ public class TestDrainingServer {
|
||||||
return TEST_UTIL.getMiniHBaseCluster().countServedRegions() ==
|
return TEST_UTIL.getMiniHBaseCluster().countServedRegions() ==
|
||||||
(COUNT_OF_REGIONS + 2 /*catalog regions*/);
|
(COUNT_OF_REGIONS + 2 /*catalog regions*/);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,73 +1,72 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2011 The Apache Software Foundation
|
* Copyright 2011 The Apache Software Foundation
|
||||||
*
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
* to you under the Apache License, Version 2.0 (the
|
* to you under the Apache License, Version 2.0 (the
|
||||||
* "License"); you may not use this file except in compliance
|
* "License"); you may not use this file except in compliance
|
||||||
* with the License. You may obtain a copy of the License at
|
* with the License. You may obtain a copy of the License at
|
||||||
*
|
*
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
*
|
*
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.junit.*;
|
||||||
import org.junit.*;
|
|
||||||
|
public class TestFSTableDescriptorForceCreation {
|
||||||
public class TestFSTableDescriptorForceCreation {
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
|
||||||
|
@Test
|
||||||
@Test
|
public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
|
||||||
public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
|
throws IOException {
|
||||||
throws IOException {
|
final String name = "newTable2";
|
||||||
final String name = "newTable2";
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
HTableDescriptor htd = new HTableDescriptor(name);
|
||||||
HTableDescriptor htd = new HTableDescriptor(name);
|
|
||||||
|
assertTrue("Should create new table descriptor",
|
||||||
assertTrue("Should create new table descriptor",
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
|
||||||
FSUtils.createTableDescriptor(fs, rootdir, htd, false));
|
}
|
||||||
}
|
|
||||||
|
@Test
|
||||||
@Test
|
public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
|
||||||
public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
|
throws IOException {
|
||||||
throws IOException {
|
final String name = "testAlreadyExists";
|
||||||
final String name = "testAlreadyExists";
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
// Cleanup old tests if any detrius laying around.
|
||||||
// Cleanup old tests if any detrius laying around.
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
||||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
HTableDescriptor htd = new HTableDescriptor(name);
|
||||||
HTableDescriptor htd = new HTableDescriptor(name);
|
htds.add(htd);
|
||||||
htds.add(htd);
|
assertFalse("Should not create new table descriptor",
|
||||||
assertFalse("Should not create new table descriptor", FSUtils
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
|
||||||
.createTableDescriptor(fs, rootdir, htd, false));
|
}
|
||||||
}
|
|
||||||
|
@Test
|
||||||
@Test
|
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
|
||||||
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
|
throws Exception {
|
||||||
throws Exception {
|
final String name = "createNewTableNew2";
|
||||||
final String name = "createNewTableNew2";
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
HTableDescriptor htd = new HTableDescriptor(name);
|
||||||
HTableDescriptor htd = new HTableDescriptor(name);
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false);
|
||||||
FSUtils.createTableDescriptor(fs, rootdir, htd, false);
|
assertTrue("Should create new table descriptor",
|
||||||
assertTrue("Should create new table descriptor", FSUtils
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true));
|
||||||
.createTableDescriptor(fs, rootdir, htd, true));
|
}
|
||||||
}
|
}
|
||||||
}
|
|
|
@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
|
||||||
|
@ -357,7 +357,7 @@ public class TestMasterFailover {
|
||||||
Path rootdir = filesystem.makeQualified(
|
Path rootdir = filesystem.makeQualified(
|
||||||
new Path(conf.get(HConstants.HBASE_DIR)));
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
// Write the .tableinfo
|
// Write the .tableinfo
|
||||||
FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
||||||
|
|
||||||
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
|
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
|
||||||
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||||
|
@ -369,7 +369,7 @@ public class TestMasterFailover {
|
||||||
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
||||||
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
// Write the .tableinfo
|
// Write the .tableinfo
|
||||||
FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
||||||
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||||
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||||
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||||
|
@ -681,7 +681,7 @@ public class TestMasterFailover {
|
||||||
Path rootdir = filesystem.makeQualified(
|
Path rootdir = filesystem.makeQualified(
|
||||||
new Path(conf.get(HConstants.HBASE_DIR)));
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
// Write the .tableinfo
|
// Write the .tableinfo
|
||||||
FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
||||||
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
|
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
|
||||||
null, null);
|
null, null);
|
||||||
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||||
|
@ -693,7 +693,7 @@ public class TestMasterFailover {
|
||||||
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
||||||
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
// Write the .tableinfo
|
// Write the .tableinfo
|
||||||
FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
||||||
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||||
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -68,11 +69,11 @@ public class TestHRegionInfo {
|
||||||
|
|
||||||
// Delete the temporary table directory that might still be there from the
|
// Delete the temporary table directory that might still be there from the
|
||||||
// previous test run.
|
// previous test run.
|
||||||
FSUtils.deleteTableDescriptorIfExists(tablename,
|
FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
|
||||||
HTU.getConfiguration());
|
HTU.getConfiguration());
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tablename);
|
HTableDescriptor htd = new HTableDescriptor(tablename);
|
||||||
FSUtils.createTableDescriptor(htd, HTU.getConfiguration());
|
FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
|
||||||
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
|
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||||
HTableDescriptor htd2 = hri.getTableDesc();
|
HTableDescriptor htd2 = hri.getTableDesc();
|
||||||
|
|
|
@ -21,9 +21,11 @@ import static org.junit.Assert.*;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
@ -42,6 +44,87 @@ public class TestFSTableDescriptors {
|
||||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class);
|
private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class);
|
||||||
|
|
||||||
|
@Test (expected=IllegalArgumentException.class)
|
||||||
|
public void testRegexAgainstOldStyleTableInfo() {
|
||||||
|
Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME);
|
||||||
|
int i = FSTableDescriptors.getTableInfoSequenceid(p);
|
||||||
|
assertEquals(0, i);
|
||||||
|
// Assert it won't eat garbage -- that it fails
|
||||||
|
p = new Path("/tmp", "abc");
|
||||||
|
FSTableDescriptors.getTableInfoSequenceid(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateAndUpdate() throws IOException {
|
||||||
|
Path testdir = UTIL.getDataTestDir();
|
||||||
|
HTableDescriptor htd = new HTableDescriptor("testCreate");
|
||||||
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
|
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
|
||||||
|
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
|
||||||
|
FileStatus [] statuses = fs.listStatus(testdir);
|
||||||
|
assertTrue(statuses.length == 1);
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
|
||||||
|
}
|
||||||
|
statuses = fs.listStatus(testdir);
|
||||||
|
assertTrue(statuses.length == 1);
|
||||||
|
Path tmpTableDir = new Path(FSUtils.getTablePath(testdir, htd.getName()), ".tmp");
|
||||||
|
statuses = fs.listStatus(tmpTableDir);
|
||||||
|
assertTrue(statuses.length == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSequenceidAdvancesOnTableInfo() throws IOException {
|
||||||
|
Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
|
||||||
|
HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
|
||||||
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
|
Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
|
||||||
|
int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
|
||||||
|
Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
|
||||||
|
// Assert we cleaned up the old file.
|
||||||
|
assertTrue(!fs.exists(p0));
|
||||||
|
int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
|
||||||
|
assertTrue(i1 == i0 + 1);
|
||||||
|
Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
|
||||||
|
// Assert we cleaned up the old file.
|
||||||
|
assertTrue(!fs.exists(p1));
|
||||||
|
int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
|
||||||
|
assertTrue(i2 == i1 + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFormatTableInfoSequenceId() {
|
||||||
|
Path p0 = assertWriteAndReadSequenceid(0);
|
||||||
|
// Assert p0 has format we expect.
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
|
||||||
|
sb.append("0");
|
||||||
|
}
|
||||||
|
assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(),
|
||||||
|
p0.getName());
|
||||||
|
// Check a few more.
|
||||||
|
Path p2 = assertWriteAndReadSequenceid(2);
|
||||||
|
Path p10000 = assertWriteAndReadSequenceid(10000);
|
||||||
|
// Get a .tablinfo that has no sequenceid suffix.
|
||||||
|
Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
|
||||||
|
FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
|
||||||
|
FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
|
||||||
|
FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
|
||||||
|
FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
|
||||||
|
FSTableDescriptors.FileStatusFileNameComparator comparator =
|
||||||
|
new FSTableDescriptors.FileStatusFileNameComparator();
|
||||||
|
assertTrue(comparator.compare(fs, fs0) > 0);
|
||||||
|
assertTrue(comparator.compare(fs0, fs2) > 0);
|
||||||
|
assertTrue(comparator.compare(fs2, fs10000) > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Path assertWriteAndReadSequenceid(final int i) {
|
||||||
|
Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i);
|
||||||
|
int ii = FSTableDescriptors.getTableInfoSequenceid(p);
|
||||||
|
assertEquals(i, ii);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRemoves() throws IOException {
|
public void testRemoves() throws IOException {
|
||||||
final String name = "testRemoves";
|
final String name = "testRemoves";
|
||||||
|
@ -62,14 +145,14 @@ public class TestFSTableDescriptors {
|
||||||
Path rootdir = UTIL.getDataTestDir(name);
|
Path rootdir = UTIL.getDataTestDir(name);
|
||||||
createHTDInFS(fs, rootdir, htd);
|
createHTDInFS(fs, rootdir, htd);
|
||||||
HTableDescriptor htd2 =
|
HTableDescriptor htd2 =
|
||||||
FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
|
FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
|
||||||
assertTrue(htd.equals(htd2));
|
assertTrue(htd.equals(htd2));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createHTDInFS(final FileSystem fs, Path rootdir,
|
private void createHTDInFS(final FileSystem fs, Path rootdir,
|
||||||
final HTableDescriptor htd)
|
final HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
FSUtils.createTableDescriptor(fs, rootdir, htd);
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testHTableDescriptors()
|
@Test public void testHTableDescriptors()
|
||||||
|
@ -102,7 +185,7 @@ public class TestFSTableDescriptors {
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
HTableDescriptor htd = new HTableDescriptor(name + i);
|
HTableDescriptor htd = new HTableDescriptor(name + i);
|
||||||
htd.addFamily(new HColumnDescriptor("" + i));
|
htd.addFamily(new HColumnDescriptor("" + i));
|
||||||
FSUtils.updateHTableDescriptor(fs, rootdir, htd);
|
FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
|
||||||
}
|
}
|
||||||
// Wait a while so mod time we write is for sure different.
|
// Wait a while so mod time we write is for sure different.
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
|
@ -121,7 +204,7 @@ public class TestFSTableDescriptors {
|
||||||
htds.cachehits >= ((count * 2) + 1));
|
htds.cachehits >= ((count * 2) + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test (expected=java.io.FileNotFoundException.class)
|
@Test (expected=org.apache.hadoop.hbase.TableExistsException.class)
|
||||||
public void testNoSuchTable() throws IOException {
|
public void testNoSuchTable() throws IOException {
|
||||||
final String name = "testNoSuchTable";
|
final String name = "testNoSuchTable";
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
|
@ -143,4 +226,30 @@ public class TestFSTableDescriptors {
|
||||||
htds.add(htd);
|
htds.add(htd);
|
||||||
htds.add(htd);
|
htds.add(htd);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
@Test
|
||||||
|
public void testTableInfoFileStatusComparator() {
|
||||||
|
FileStatus bare =
|
||||||
|
new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
|
||||||
|
FileStatus future =
|
||||||
|
new FileStatus(0, false, 0, 0, -1,
|
||||||
|
new Path("/tmp/tablinfo." + System.currentTimeMillis()));
|
||||||
|
FileStatus farFuture =
|
||||||
|
new FileStatus(0, false, 0, 0, -1,
|
||||||
|
new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
|
||||||
|
FileStatus [] alist = {bare, future, farFuture};
|
||||||
|
FileStatus [] blist = {bare, farFuture, future};
|
||||||
|
FileStatus [] clist = {farFuture, bare, future};
|
||||||
|
FSTableDescriptors.FileStatusFileNameComparator c =
|
||||||
|
new FSTableDescriptors.FileStatusFileNameComparator();
|
||||||
|
Arrays.sort(alist, c);
|
||||||
|
Arrays.sort(blist, c);
|
||||||
|
Arrays.sort(clist, c);
|
||||||
|
// Now assert all sorted same in way we want.
|
||||||
|
for (int i = 0; i < alist.length; i++) {
|
||||||
|
assertTrue(alist[i].equals(blist[i]));
|
||||||
|
assertTrue(blist[i].equals(clist[i]));
|
||||||
|
assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -35,7 +35,6 @@ import org.junit.Test;
|
||||||
* Test {@link FSUtils}.
|
* Test {@link FSUtils}.
|
||||||
*/
|
*/
|
||||||
public class TestFSUtils {
|
public class TestFSUtils {
|
||||||
|
|
||||||
@Test public void testIsHDFS() throws Exception {
|
@Test public void testIsHDFS() throws Exception {
|
||||||
HBaseTestingUtility htu = new HBaseTestingUtility();
|
HBaseTestingUtility htu = new HBaseTestingUtility();
|
||||||
htu.getConfiguration().setBoolean("dfs.support.append", false);
|
htu.getConfiguration().setBoolean("dfs.support.append", false);
|
||||||
|
|
|
@ -96,7 +96,7 @@ public class TestMergeTable {
|
||||||
|
|
||||||
// Create regions and populate them at same time. Create the tabledir
|
// Create regions and populate them at same time. Create the tabledir
|
||||||
// for them first.
|
// for them first.
|
||||||
FSUtils.createTableDescriptor(fs, rootdir, desc);
|
FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
|
||||||
HRegion [] regions = {
|
HRegion [] regions = {
|
||||||
createRegion(desc, null, row_70001, 1, 70000, rootdir),
|
createRegion(desc, null, row_70001, 1, 70000, rootdir),
|
||||||
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
|
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
|
||||||
|
|
|
@ -136,7 +136,7 @@ public class TestMergeTool extends HBaseTestCase {
|
||||||
try {
|
try {
|
||||||
// Create root and meta regions
|
// Create root and meta regions
|
||||||
createRootAndMetaRegions();
|
createRootAndMetaRegions();
|
||||||
FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc);
|
FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc);
|
||||||
/*
|
/*
|
||||||
* Create the regions we will merge
|
* Create the regions we will merge
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue