HBASE-8778 Region assigments scan table directory making them slow for huge tables

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1510977 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-08-06 14:43:09 +00:00
parent f06af44932
commit 10a25c52cf
23 changed files with 597 additions and 391 deletions

View File

@ -452,10 +452,14 @@ public class MasterFileSystem {
// Make sure the meta region directory exists! // Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) { if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c); bootstrap(rd, c);
} else {
// Migrate table descriptor files if necessary
org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
.migrateFSTableDescriptorsIfNecessary(fs, rd);
} }
// Create tableinfo-s for META if not already there. // Create tableinfo-s for META if not already there.
FSTableDescriptors.createTableDescriptor(fs, rd, HTableDescriptor.META_TABLEDESC, false); new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
return rd; return rd;
} }
@ -491,7 +495,7 @@ public class MasterFileSystem {
LOG.info("BOOTSTRAP: creating META region"); LOG.info("BOOTSTRAP: creating META region");
try { try {
// Bootstrapping, make sure blockcache is off. Else, one will be // Bootstrapping, make sure blockcache is off. Else, one will be
// created here in bootstap and it'll need to be cleaned up. Better to // created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap. // not make it in first place. Turn off block caching for bootstrap.
// Enable after. // Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
@ -589,16 +593,6 @@ public class MasterFileSystem {
} }
} }
/**
* Create new HTableDescriptor in HDFS.
*
* @param htableDescriptor
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
}
/** /**
* Delete column of a table * Delete column of a table
* @param tableName * @param tableName

View File

@ -202,8 +202,9 @@ public class CreateTableHandler extends EventHandler {
FileSystem fs = fileSystemManager.getFileSystem(); FileSystem fs = fileSystemManager.getFileSystem();
// 1. Create Table Descriptor // 1. Create Table Descriptor
FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor);
Path tempTableDir = new Path(tempdir, tableName); Path tempTableDir = new Path(tempdir, tableName);
new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
tempTableDir, this.hTableDescriptor, false);
Path tableDir = new Path(fileSystemManager.getRootDir(), tableName); Path tableDir = new Path(fileSystemManager.getRootDir(), tableName);
// 2. Create Regions // 2. Create Regions

View File

@ -131,7 +131,7 @@ public final class MasterSnapshotVerifier {
* @param snapshotDir snapshot directory to check * @param snapshotDir snapshot directory to check
*/ */
private void verifyTableInfo(Path snapshotDir) throws IOException { private void verifyTableInfo(Path snapshotDir) throws IOException {
FSTableDescriptors.getTableDescriptor(fs, snapshotDir); FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
} }
/** /**

View File

@ -667,7 +667,8 @@ public class SnapshotManager implements Stoppable {
// read snapshot information // read snapshot information
SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
HTableDescriptor snapshotTableDesc = FSTableDescriptors.getTableDescriptor(fs, snapshotDir); HTableDescriptor snapshotTableDesc =
FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
String tableName = reqSnapshot.getTable(); String tableName = reqSnapshot.getTable();
// stop tracking "abandoned" handlers // stop tracking "abandoned" handlers

View File

@ -51,7 +51,6 @@ import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -113,12 +112,12 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) { if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent(); Path regionDir = path.getParent();
Path tableDir = regionDir.getParent(); Path tableDir = regionDir.getParent();
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) { } else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent(); Path tableDir = path.getParent();
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major); compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) { } else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major); compactTable(path, compactOnce, major);
@ -130,7 +129,7 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException { throws IOException {
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major); compactRegion(tableDir, htd, regionDir, compactOnce, major);
} }

View File

@ -317,7 +317,7 @@ public final class SnapshotInfo extends Configured implements Tool {
} }
snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotTableDesc = FSTableDescriptors.getTableDescriptor(fs, snapshotDir); snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
return true; return true;
} }

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
/** /**
@ -61,12 +60,14 @@ public class TableInfoCopyTask extends SnapshotTask {
LOG.debug("Attempting to copy table info for snapshot:" LOG.debug("Attempting to copy table info for snapshot:"
+ ClientSnapshotDescriptionUtils.toString(this.snapshot)); + ClientSnapshotDescriptionUtils.toString(this.snapshot));
// get the HTable descriptor // get the HTable descriptor
HTableDescriptor orig = FSTableDescriptors.getTableDescriptor(fs, rootDir,
Bytes.toBytes(this.snapshot.getTable())); HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
this.snapshot.getTable());
this.rethrowException(); this.rethrowException();
// write a copy of descriptor to the snapshot directory // write a copy of descriptor to the snapshot directory
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, snapshotDir, orig, false); new FSTableDescriptors(fs, rootDir)
.createTableDescriptorForTableDirectory(snapshotDir, orig, false);
LOG.debug("Finished copying tableinfo."); LOG.debug("Finished copying tableinfo.");
return null; return null;
} }

View File

@ -0,0 +1,137 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
/**
* A class to migrate table descriptor files to a dedicated subdir.
* Invoked by HMaster.finishInitialization before accessing table descriptors.
* Migrates snapshots, user tables, and system tables.
*
* @deprecated will be removed for the major release after 0.96.
*/
@Deprecated
public class FSTableDescriptorMigrationToSubdir {
private static final Log LOG = LogFactory.getLog(FSTableDescriptorMigrationToSubdir.class);
public static void migrateFSTableDescriptorsIfNecessary(FileSystem fs, Path rootDir)
throws IOException {
if (needsMigration(fs, rootDir)) {
migrateFsTableDescriptors(fs, rootDir);
LOG.info("Migration complete.");
}
}
/**
* Determines if migration is required by checking to see whether the META table has been
* migrated.
*/
private static boolean needsMigration(FileSystem fs, Path rootDir) throws IOException {
Path metaTableDir = FSTableDescriptors.getTableDirectory(rootDir,
Bytes.toString(HConstants.META_TABLE_NAME));
FileStatus metaTableInfoStatus =
FSTableDescriptors.getTableInfoPath(fs, metaTableDir);
return metaTableInfoStatus == null;
}
/**
* Migrates all snapshots, user tables and system tables that require migration.
* First migrates snapshots.
* Then migrates each user table in order,
* then attempts ROOT (should be gone)
* Migrates META last to indicate migration is complete.
*/
private static void migrateFsTableDescriptors(FileSystem fs, Path rootDir) throws IOException {
// First migrate snapshots - will migrate any snapshot dir that contains a table info file
Path snapshotsDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
if (fs.exists(snapshotsDir)) {
LOG.info("Migrating snapshots");
FileStatus[] snapshots = fs.listStatus(snapshotsDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
for (FileStatus snapshot : snapshots) {
migrateTable(fs, snapshot.getPath());
}
}
LOG.info("Migrating user tables");
List<Path> userTableDirs = FSUtils.getTableDirs(fs, rootDir);
for (Path userTableDir : userTableDirs) {
migrateTable(fs, userTableDir);
}
LOG.info("Migrating system tables");
migrateTableIfExists(fs, rootDir, HConstants.ROOT_TABLE_NAME);
// migrate meta last because that's what we check to see if migration is complete
migrateTableIfExists(fs, rootDir, HConstants.META_TABLE_NAME);
}
private static void migrateTableIfExists(FileSystem fs, Path rootDir, byte[] tableName)
throws IOException {
Path tableDir = FSTableDescriptors.getTableDirectory(rootDir, Bytes.toString(tableName));
if (fs.exists(tableDir)) {
migrateTable(fs, tableDir);
}
}
/**
* Migrates table info files.
* Moves the latest table info file (is present) from the table dir to the table info subdir.
* Removes any older table info files from the table dir and any existing table info subdir.
*/
private static void migrateTable(FileSystem fs, Path tableDir) throws IOException {
FileStatus oldTableStatus = FSTableDescriptors.getCurrentTableInfoStatus(fs, tableDir, true);
if (oldTableStatus == null) {
LOG.debug("No table info file to migrate for " + tableDir);
return;
}
Path tableInfoDir = new Path(tableDir, FSTableDescriptors.TABLEINFO_DIR);
// remove table info subdir if it already exists
boolean removedExistingSubdir = FSUtils.deleteDirectory(fs, tableInfoDir);
if (removedExistingSubdir) {
LOG.info("Removed existing subdir at: " + tableInfoDir);
}
boolean createdSubdir = fs.mkdirs(tableInfoDir);
if (!createdSubdir) {
throw new IOException("Unable to create new table info directory: " + tableInfoDir);
}
Path oldTableInfoPath = oldTableStatus.getPath();
Path newTableInfoPath = new Path(tableInfoDir, oldTableInfoPath.getName());
boolean renamedInfoFile = fs.rename(oldTableInfoPath, newTableInfoPath);
if (!renamedInfoFile) {
throw new IOException("Failed to move table info file from old location: "
+ oldTableInfoPath + " to new location: " + newTableInfoPath);
}
LOG.info("Migrated table info from: " + oldTableInfoPath
+ " to new location: " + newTableInfoPath);
}
}

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -39,25 +38,27 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException; import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.Ints; import com.google.common.primitives.Ints;
/** /**
* Implementation of {@link TableDescriptors} that reads descriptors from the * Implementation of {@link TableDescriptors} that reads descriptors from the
* passed filesystem. It expects descriptors to be in a file under the * passed filesystem. It expects descriptors to be in a file in the
* table's directory in FS. Can be read-only -- i.e. does not modify * {@link #TABLEINFO_DIR} subdir of the table's directory in FS. Can be read-only
* the filesystem or can be read and write. * -- i.e. does not modify the filesystem or can be read and write.
* *
* <p>Also has utility for keeping up the table descriptors tableinfo file. * <p>Also has utility for keeping up the table descriptors tableinfo file.
* The table schema file is kept under the table directory in the filesystem. * The table schema file is kept in the {@link #TABLEINFO_DIR} subdir
* It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the * of the table directory in the filesystem.
* It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the
* edit sequenceid: e.g. <code>.tableinfo.0000000003</code>. This sequenceid * edit sequenceid: e.g. <code>.tableinfo.0000000003</code>. This sequenceid
* is always increasing. It starts at zero. The table schema file with the * is always increasing. It starts at zero. The table schema file with the
* highest sequenceid has the most recent schema edit. Usually there is one file * highest sequenceid has the most recent schema edit. Usually there is one file
@ -72,27 +73,29 @@ public class FSTableDescriptors implements TableDescriptors {
private final FileSystem fs; private final FileSystem fs;
private final Path rootdir; private final Path rootdir;
private final boolean fsreadonly; private final boolean fsreadonly;
long cachehits = 0; @VisibleForTesting long cachehits = 0;
long invocations = 0; @VisibleForTesting long invocations = 0;
/** The file name used to store HTD in HDFS */ /** The file name prefix used to store HTD in HDFS */
public static final String TABLEINFO_NAME = ".tableinfo"; static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
// This cache does not age out the old stuff. Thinking is that the amount // This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge. // of data we keep up in here is so small, no need to do occasional purge.
// TODO. // TODO.
private final Map<String, TableDescriptorModtime> cache = private final Map<String, TableDescriptorAndModtime> cache =
new ConcurrentHashMap<String, TableDescriptorModtime>(); new ConcurrentHashMap<String, TableDescriptorAndModtime>();
/** /**
* Data structure to hold modification time and table descriptor. * Data structure to hold modification time and table descriptor.
*/ */
static class TableDescriptorModtime { private static class TableDescriptorAndModtime {
private final HTableDescriptor descriptor; private final HTableDescriptor htd;
private final long modtime; private final long modtime;
TableDescriptorModtime(final long modtime, final HTableDescriptor htd) { TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) {
this.descriptor = htd; this.htd = htd;
this.modtime = modtime; this.modtime = modtime;
} }
@ -101,30 +104,40 @@ public class FSTableDescriptors implements TableDescriptors {
} }
HTableDescriptor getTableDescriptor() { HTableDescriptor getTableDescriptor() {
return this.descriptor; return this.htd;
} }
} }
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
* conf and the filesystem where that root dir lives.
* This instance can do write operations (is not read only).
*/
public FSTableDescriptors(final Configuration conf) throws IOException {
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
}
public FSTableDescriptors(final FileSystem fs, final Path rootdir) { public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
this(fs, rootdir, false); this(fs, rootdir, false);
} }
/** /**
* @param fs * @param fsreadonly True if we are read-only when it comes to filesystem
* @param rootdir
* @param fsreadOnly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs. * operations; i.e. on remove, we do not do delete in fs.
*/ */
public FSTableDescriptors(final FileSystem fs, final Path rootdir, public FSTableDescriptors(final FileSystem fs,
final boolean fsreadOnly) { final Path rootdir, final boolean fsreadonly) {
super(); super();
this.fs = fs; this.fs = fs;
this.rootdir = rootdir; this.rootdir = rootdir;
this.fsreadonly = fsreadOnly; this.fsreadonly = fsreadonly;
} }
/* (non-Javadoc) /**
* @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String) * Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/ */
@Override @Override
public HTableDescriptor get(final byte [] tablename) public HTableDescriptor get(final byte [] tablename)
@ -132,8 +145,11 @@ public class FSTableDescriptors implements TableDescriptors {
return get(Bytes.toString(tablename)); return get(Bytes.toString(tablename));
} }
/* (non-Javadoc) /**
* @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[]) * Get the current table descriptor for the given table, or null if none exists.
*
* Uses a local cache of the descriptor but still checks the filesystem on each call
* to see if a newer file has been created since the cached one was read.
*/ */
@Override @Override
public HTableDescriptor get(final String tablename) public HTableDescriptor get(final String tablename)
@ -150,23 +166,23 @@ public class FSTableDescriptors implements TableDescriptors {
// .META. and -ROOT- is already handled. If some one tries to get the descriptor for // .META. and -ROOT- is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception. // .logs, .oldlogs or .corrupt throw an exception.
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) {
throw new IOException("No descriptor found for table = " + tablename); throw new IOException("No descriptor found for non table = " + tablename);
} }
// Look in cache of descriptors. // Look in cache of descriptors.
TableDescriptorModtime cachedtdm = this.cache.get(tablename); TableDescriptorAndModtime cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) { if (cachedtdm != null) {
// Check mod time has not changed (this is trip to NN). // Check mod time has not changed (this is trip to NN).
if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) { if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) {
cachehits++; cachehits++;
return cachedtdm.getTableDescriptor(); return cachedtdm.getTableDescriptor();
} }
} }
TableDescriptorModtime tdmt = null; TableDescriptorAndModtime tdmt = null;
try { try {
tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename); tdmt = getTableDescriptorAndModtime(tablename);
} catch (NullPointerException e) { } catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = " LOG.debug("Exception during readTableDecriptor. Current table name = "
+ tablename, e); + tablename, e);
@ -185,8 +201,8 @@ public class FSTableDescriptors implements TableDescriptors {
return tdmt == null ? null : tdmt.getTableDescriptor(); return tdmt == null ? null : tdmt.getTableDescriptor();
} }
/* (non-Javadoc) /**
* @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) * Returns a map from table name to table descriptor for all tables.
*/ */
@Override @Override
public Map<String, HTableDescriptor> getAll() public Map<String, HTableDescriptor> getAll()
@ -208,8 +224,15 @@ public class FSTableDescriptors implements TableDescriptors {
return htds; return htds;
} }
/**
* Adds (or updates) the table descriptor to the FileSystem
* and updates the local cache with it.
*/
@Override @Override
public void add(HTableDescriptor htd) throws IOException { public void add(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) { if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) {
throw new NotImplementedException(); throw new NotImplementedException();
} }
@ -217,108 +240,179 @@ public class FSTableDescriptors implements TableDescriptors {
throw new NotImplementedException(); throw new NotImplementedException();
} }
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
throw new NotImplementedException(); throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
} }
if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); updateTableDescriptor(htd);
long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); long modtime = getTableInfoModtime(htd.getNameAsString());
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd));
} }
/**
* Removes the table descriptor from the local cache and returns it.
* If not in read only mode, it also deletes the entire table directory(!)
* from the FileSystem.
*/
@Override @Override
public HTableDescriptor remove(final String tablename) public HTableDescriptor remove(final String tablename)
throws IOException { throws IOException {
if (!this.fsreadonly) { if (fsreadonly) {
Path tabledir = FSUtils.getTablePath(this.rootdir, tablename); throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
if (this.fs.exists(tabledir)) { }
if (!this.fs.delete(tabledir, true)) { Path tabledir = getTableDirectory(tablename);
throw new IOException("Failed delete of " + tabledir.toString()); if (this.fs.exists(tabledir)) {
} if (!this.fs.delete(tabledir, true)) {
throw new IOException("Failed delete of " + tabledir.toString());
} }
} }
TableDescriptorModtime tdm = this.cache.remove(tablename); TableDescriptorAndModtime tdm = this.cache.remove(tablename);
return tdm == null ? null : tdm.getTableDescriptor(); return tdm == null ? null : tdm.getTableDescriptor();
} }
/** /**
* Checks if <code>.tableinfo<code> exists for given table * Checks if a current table info file exists for the given table
* *
* @param fs file system
* @param rootdir root directory of HBase installation
* @param tableName name of table * @param tableName name of table
* @return true if exists * @return true if exists
* @throws IOException * @throws IOException
*/ */
public static boolean isTableInfoExists(FileSystem fs, Path rootdir, public boolean isTableInfoExists(String tableName) throws IOException {
String tableName) throws IOException { return getTableInfoPath(tableName) != null;
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
return status == null? false: fs.exists(status.getPath());
}
private static FileStatus getTableInfoPath(final FileSystem fs,
final Path rootdir, final String tableName)
throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, tableName);
return getTableInfoPath(fs, tabledir);
} }
/** /**
* Looks under the table directory in the filesystem for files with a * Find the most current table info file for the given table in the hbase root directory.
* {@link #TABLEINFO_NAME} prefix. Returns reference to the 'latest' instance. * @return The file status of the current table info file or null if it does not exist
* @param fs */
* @param tabledir private FileStatus getTableInfoPath(final String tableName) throws IOException {
* @return The 'current' tableinfo file. Path tableDir = getTableDirectory(tableName);
return getTableInfoPath(tableDir);
}
private FileStatus getTableInfoPath(Path tableDir)
throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly);
}
/**
* Find the most current table info file for the table located in the given table directory.
*
* Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException * @throws IOException
*/ */
public static FileStatus getTableInfoPath(final FileSystem fs, public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
final Path tabledir)
throws IOException { throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() { return getTableInfoPath(fs, tableDir, false);
@Override }
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME /**
return p.getName().startsWith(TABLEINFO_NAME); * Find the most current table info file for the table in the given table directory.
} *
}); * Looks within the {@link #TABLEINFO_DIR} subdirectory of the given directory for any table info
* files and takes the 'current' one - meaning the one with the highest sequence number if
* present or no sequence number at all if none exist (for backward compatibility from before
* there were sequence numbers).
* If there are multiple table info files found and removeOldFiles is true it also deletes the
* older files.
*
* @return The file status of the current table info file or null if none exist
* @throws IOException
*/
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
throws IOException {
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
return getCurrentTableInfoStatus(fs, tableInfoDir, removeOldFiles);
}
/**
* Find the most current table info file in the given directory
*
* Looks within the given directory for any table info files
* and takes the 'current' one - meaning the one with the highest sequence number if present
* or no sequence number at all if none exist (for backward compatibility from before there
* were sequence numbers).
* If there are multiple possible files found
* and the we're not in read only mode it also deletes the older files.
*
* @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/
// only visible for FSTableDescriptorMigrationToSubdir, can be removed with that
static FileStatus getCurrentTableInfoStatus(FileSystem fs, Path dir, boolean removeOldFiles)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
if (status == null || status.length < 1) return null; if (status == null || status.length < 1) return null;
Arrays.sort(status, new FileStatusFileNameComparator()); FileStatus mostCurrent = null;
if (status.length > 1) { for (FileStatus file : status) {
// Clean away old versions of .tableinfo if (mostCurrent == null || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) {
for (int i = 1; i < status.length; i++) { mostCurrent = file;
Path p = status[i].getPath(); }
// Clean up old versions }
if (!fs.delete(p, false)) { if (removeOldFiles && status.length > 1) {
LOG.warn("Failed cleanup of " + p); // Clean away old versions
} else { for (FileStatus file : status) {
LOG.debug("Cleaned up old tableinfo file " + p); Path path = file.getPath();
if (file != mostCurrent) {
if (!fs.delete(file.getPath(), false)) {
LOG.warn("Failed cleanup of " + path);
} else {
LOG.debug("Cleaned up old tableinfo file " + path);
}
} }
} }
} }
return status[0]; return mostCurrent;
} }
/** /**
* Compare {@link FileStatus} instances by {@link Path#getName()}. * Compare {@link FileStatus} instances by {@link Path#getName()}. Returns in
* Returns in reverse order. * reverse order.
*/ */
static class FileStatusFileNameComparator @VisibleForTesting
implements Comparator<FileStatus> { static final Comparator<FileStatus> TABLEINFO_FILESTATUS_COMPARATOR =
new Comparator<FileStatus>() {
@Override @Override
public int compare(FileStatus left, FileStatus right) { public int compare(FileStatus left, FileStatus right) {
return -left.compareTo(right); return -left.compareTo(right);
} }};
/**
* Return the table directory in HDFS
*/
@VisibleForTesting Path getTableDirectory(final String tableName) {
return getTableDirectory(rootdir, tableName);
} }
/**
* Return the table directory in HDFS
*/
static Path getTableDirectory(Path rootDir, String tableName) {
return FSUtils.getTablePath(rootDir, tableName);
}
private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
// Accept any file that starts with TABLEINFO_NAME
return p.getName().startsWith(TABLEINFO_FILE_PREFIX);
}};
/** /**
* Width of the sequenceid that is a suffix on a tableinfo file. * Width of the sequenceid that is a suffix on a tableinfo file.
*/ */
static final int WIDTH_OF_SEQUENCE_ID = 10; @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10;
/* /*
* @param number Number to use as suffix. * @param number Number to use as suffix.
* @return Returns zero-prefixed 5-byte wide decimal version of passed * @return Returns zero-prefixed decimal version of passed
* number (Does absolute in case number is negative). * number (Does absolute in case number is negative).
*/ */
static String formatTableInfoSequenceId(final int number) { private static String formatTableInfoSequenceId(final int number) {
byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
int d = Math.abs(number); int d = Math.abs(number);
for (int i = b.length - 1; i >= 0; i--) { for (int i = b.length - 1; i >= 0; i--) {
@ -333,17 +427,16 @@ public class FSTableDescriptors implements TableDescriptors {
* Use regex because may encounter oldstyle .tableinfos where there is no * Use regex because may encounter oldstyle .tableinfos where there is no
* sequenceid on the end. * sequenceid on the end.
*/ */
private static final Pattern SUFFIX = private static final Pattern TABLEINFO_FILE_REGEX =
Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$"); Pattern.compile(TABLEINFO_FILE_PREFIX + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
/** /**
* @param p Path to a <code>.tableinfo</code> file. * @param p Path to a <code>.tableinfo</code> file.
* @return The current editid or 0 if none found. * @return The current editid or 0 if none found.
*/ */
static int getTableInfoSequenceid(final Path p) { @VisibleForTesting static int getTableInfoSequenceId(final Path p) {
if (p == null) return 0; if (p == null) return 0;
Matcher m = SUFFIX.matcher(p.getName()); Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
if (!m.matches()) throw new IllegalArgumentException(p.toString()); if (!m.matches()) throw new IllegalArgumentException(p.toString());
String suffix = m.group(2); String suffix = m.group(2);
if (suffix == null || suffix.length() <= 0) return 0; if (suffix == null || suffix.length() <= 0) return 0;
@ -355,73 +448,70 @@ public class FSTableDescriptors implements TableDescriptors {
* @param sequenceid * @param sequenceid
* @return Name of tableinfo file. * @return Name of tableinfo file.
*/ */
static Path getTableInfoFileName(final Path tabledir, final int sequenceid) { @VisibleForTesting static String getTableInfoFileName(final int sequenceid) {
return new Path(tabledir, return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid);
TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
} }
/** /**
* @param fs * @param fs
* @param rootdir * @param rootdir
* @param tableName * @param tableName
* @return Modification time for the table {@link #TABLEINFO_NAME} file * @return Modification time for the table {@link #TABLEINFO_FILE_PREFIX} file
* or <code>0</code> if no tableinfo file found. * or <code>0</code> if no tableinfo file found.
* @throws IOException * @throws IOException
*/ */
static long getTableInfoModtime(final FileSystem fs, final Path rootdir, private long getTableInfoModtime(final String tableName) throws IOException {
final String tableName) FileStatus status = getTableInfoPath(tableName);
throws IOException { return status == null ? 0 : status.getModificationTime();
FileStatus status = getTableInfoPath(fs, rootdir, tableName);
return status == null? 0: status.getModificationTime();
} }
/** /**
* Get HTD from HDFS. * Returns the latest table descriptor for the given table directly from the file system
* @param fs * if it exists, bypassing the local cache.
* @param hbaseRootDir * Returns null if it's not found.
* @param tableName
* @return Descriptor or null if none found.
* @throws IOException
*/ */
public static HTableDescriptor getTableDescriptor(FileSystem fs, public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, byte[] tableName) Path hbaseRootDir, String tableName) throws IOException {
Path tableDir = getTableDirectory(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
}
/**
* Returns the latest table descriptor for the table located at the given directory
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException { throws IOException {
HTableDescriptor htd = null; FileStatus status = getTableInfoPath(fs, tableDir, false);
try { if (status == null) {
TableDescriptorModtime tdmt = throw new TableInfoMissingException("No table descriptor file under " + tableDir);
getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName)); }
htd = tdmt == null ? null : tdmt.getTableDescriptor(); return readTableDescriptor(fs, status, false);
} catch (NullPointerException e) {
LOG.debug("Exception during readTableDecriptor. Current table name = "
+ Bytes.toString(tableName), e);
}
return htd;
} }
static HTableDescriptor getTableDescriptor(FileSystem fs, private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName)
Path hbaseRootDir, String tableName) throws NullPointerException, IOException { throws IOException {
TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName);
return tdmt == null ? null : tdmt.getTableDescriptor();
}
static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs,
Path hbaseRootDir, String tableName) throws NullPointerException, IOException{
// ignore both -ROOT- and .META. tables // ignore both -ROOT- and .META. tables
if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0 if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0
|| Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) { || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) {
return null; return null;
} }
return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName)); return getTableDescriptorAndModtime(getTableDirectory(tableName));
} }
static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir) private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir)
throws NullPointerException, IOException { throws IOException {
if (tableDir == null) throw new NullPointerException(); FileStatus status = getTableInfoPath(tableDir);
FileStatus status = getTableInfoPath(fs, tableDir);
if (status == null) { if (status == null) {
throw new TableInfoMissingException("No .tableinfo file under " throw new TableInfoMissingException("No table descriptor file under " + tableDir);
+ tableDir.toUri());
} }
HTableDescriptor htd = readTableDescriptor(fs, status, !fsreadonly);
return new TableDescriptorAndModtime(status.getModificationTime(), htd);
}
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
boolean rewritePb) throws IOException {
int len = Ints.checkedCast(status.getLen()); int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len]; byte [] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath()); FSDataInputStream fsDataInputStream = fs.open(status.getPath());
@ -436,108 +526,131 @@ public class FSTableDescriptors implements TableDescriptors {
} catch (DeserializationException e) { } catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e); throw new IOException("content=" + Bytes.toShort(content), e);
} }
if (!ProtobufUtil.isPBMagicPrefix(content)) { if (rewritePb && !ProtobufUtil.isPBMagicPrefix(content)) {
// Convert the file over to be pb before leaving here. // Convert the file over to be pb before leaving here.
createTableDescriptor(fs, tableDir.getParent(), htd, true); Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, htd, tableDir, status);
} }
return new TableDescriptorModtime(status.getModificationTime(), htd); return htd;
}
public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
throws IOException, NullPointerException {
TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir);
return tdmt == null ? null : tdmt.getTableDescriptor();
} }
/** /**
* Update table descriptor * Update table descriptor on the file system
* @param fs
* @param conf
* @param hTableDescriptor
* @return New tableinfo or null if we failed update.
* @throws IOException Thrown if failed update. * @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/ */
static Path updateHTableDescriptor(FileSystem fs, Path rootdir, @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd)
HTableDescriptor hTableDescriptor)
throws IOException { throws IOException {
Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName()); if (fsreadonly) {
Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
getTableInfoPath(fs, tableDir)); }
Path tableDir = getTableDirectory(htd.getNameAsString());
Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
if (p == null) throw new IOException("Failed update"); if (p == null) throw new IOException("Failed update");
LOG.info("Updated tableinfo=" + p); LOG.info("Updated tableinfo=" + p);
return p; return p;
} }
/** /**
* Deletes a table's directory from the file system if exists. Used in unit * Deletes all the table descriptor files from the file system.
* tests. * Used in unit tests only.
* @throws NotImplementedException if in read only mode
*/ */
public static void deleteTableDescriptorIfExists(String tableName, public void deleteTableDescriptorIfExists(String tableName) throws IOException {
Configuration conf) throws IOException { if (fsreadonly) {
FileSystem fs = FSUtils.getCurrentFileSystem(conf); throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName); }
// The below deleteDirectory works for either file or directory.
if (status != null && fs.exists(status.getPath())) { Path tableDir = getTableDirectory(tableName);
FSUtils.deleteDirectory(fs, status.getPath()); Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/**
* Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId.
*/
private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId)
throws IOException {
FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER);
for (FileStatus file : status) {
Path path = file.getPath();
int sequenceId = getTableInfoSequenceId(path);
if (sequenceId <= maxSequenceId) {
boolean success = FSUtils.delete(fs, path, false);
if (success) {
LOG.debug("Deleted table descriptor at " + path);
} else {
LOG.error("Failed to delete descriptor at " + path);
}
}
} }
} }
/** /**
* @param fs * Attempts to write a new table descriptor to the given table's directory.
* @param hTableDescriptor * It first writes it to the .tmp dir then uses an atomic rename to move it into place.
* @param tableDir * It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number
* @param status * not already in use.
* Removes the current descriptor file if passed in.
*
* @return Descriptor file or null if we failed write. * @return Descriptor file or null if we failed write.
* @throws IOException
*/ */
private static Path writeTableDescriptor(final FileSystem fs, private static Path writeTableDescriptor(final FileSystem fs,
final HTableDescriptor hTableDescriptor, final Path tableDir, final HTableDescriptor htd, final Path tableDir,
final FileStatus status) final FileStatus currentDescriptorFile)
throws IOException { throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon. // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
Path tmpTableDir = new Path(tableDir, ".tmp"); // This directory is never removed to avoid removing it out from under a concurrent writer.
Path tmpTableDir = new Path(tableDir, TMP_DIR);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
// What is current sequenceid? We read the current sequenceid from // What is current sequenceid? We read the current sequenceid from
// the current file. After we read it, another thread could come in and // the current file. After we read it, another thread could come in and
// compete with us writing out next version of file. The below retries // compete with us writing out next version of file. The below retries
// should help in this case some but its hard to do guarantees in face of // should help in this case some but its hard to do guarantees in face of
// concurrent schema edits. // concurrent schema edits.
int currentSequenceid = status == null? 0: getTableInfoSequenceid(status.getPath()); int currentSequenceId = currentDescriptorFile == null ? 0 :
int sequenceid = currentSequenceid; getTableInfoSequenceId(currentDescriptorFile.getPath());
int newSequenceId = currentSequenceId;
// Put arbitrary upperbound on how often we retry // Put arbitrary upperbound on how often we retry
int retries = 10; int retries = 10;
int retrymax = currentSequenceid + retries; int retrymax = currentSequenceId + retries;
Path tableInfoPath = null; Path tableInfoDirPath = null;
do { do {
sequenceid += 1; newSequenceId += 1;
Path p = getTableInfoFileName(tmpTableDir, sequenceid); String filename = getTableInfoFileName(newSequenceId);
if (fs.exists(p)) { Path tempPath = new Path(tmpTableDir, filename);
LOG.debug(p + " exists; retrying up to " + retries + " times"); if (fs.exists(tempPath)) {
LOG.debug(tempPath + " exists; retrying up to " + retries + " times");
continue; continue;
} }
tableInfoDirPath = new Path(tableInfoDir, filename);
try { try {
writeHTD(fs, p, hTableDescriptor); writeHTD(fs, tempPath, htd);
tableInfoPath = getTableInfoFileName(tableDir, sequenceid); fs.mkdirs(tableInfoDirPath.getParent());
if (!fs.rename(p, tableInfoPath)) { if (!fs.rename(tempPath, tableInfoDirPath)) {
throw new IOException("Failed rename of " + p + " to " + tableInfoPath); throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
} }
LOG.debug("Wrote descriptor into: " + tableInfoDirPath);
} catch (IOException ioe) { } catch (IOException ioe) {
// Presume clash of names or something; go around again. // Presume clash of names or something; go around again.
LOG.debug("Failed write and/or rename; retrying", ioe); LOG.debug("Failed write and/or rename; retrying", ioe);
if (!FSUtils.deleteDirectory(fs, p)) { if (!FSUtils.deleteDirectory(fs, tempPath)) {
LOG.warn("Failed cleanup of " + p); LOG.warn("Failed cleanup of " + tempPath);
} }
tableInfoPath = null; tableInfoDirPath = null;
continue; continue;
} }
// Cleanup old schema file.
if (status != null) {
if (!FSUtils.deleteDirectory(fs, status.getPath())) {
LOG.warn("Failed delete of " + status.getPath() + "; continuing");
}
}
break; break;
} while (sequenceid < retrymax); } while (newSequenceId < retrymax);
return tableInfoPath; if (tableInfoDirPath != null) {
// if we succeeded, remove old table info files.
deleteTableDescriptorFiles(fs, tableInfoDir, newSequenceId - 1);
}
return tableInfoDirPath;
} }
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd) private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
@ -552,92 +665,59 @@ public class FSTableDescriptors implements TableDescriptors {
} }
} }
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
*
* @param htableDescriptor
* @param conf
*/
public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
Configuration conf)
throws IOException {
return createTableDescriptor(htableDescriptor, conf, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param htableDescriptor
* @param conf
* @param forceCreation True if we are to overwrite existing file.
*/
static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
final Configuration conf, boolean forceCreation)
throws IOException {
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, forceCreation);
}
/** /**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. * Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests. * Used by tests.
* @param fs
* @param htableDescriptor
* @param rootdir
*/
public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
HTableDescriptor htableDescriptor)
throws IOException {
return createTableDescriptor(fs, rootdir, htableDescriptor, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @param fs
* @param htableDescriptor
* @param rootdir
* @param forceCreation
* @return True if we successfully created file. * @return True if we successfully created file.
*/ */
public static boolean createTableDescriptor(FileSystem fs, Path rootdir, public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
HTableDescriptor htableDescriptor, boolean forceCreation) return createTableDescriptor(htd, false);
}
/**
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
throws IOException { throws IOException {
Path tabledir = FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()); Path tableDir = getTableDirectory(htd.getNameAsString());
return createTableDescriptorForTableDirectory(fs, tabledir, htableDescriptor, forceCreation); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
} }
/** /**
* Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table. * a new table or snapshot a table.
* @param fs filesystem where the descriptor should be written * @param tableDir table directory under which we should write the file
* @param tabledir directory under which we should write the file * @param htd description of the table to write
* @param htableDescriptor description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
* be overwritten * be overwritten
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file * @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
* already exists and we weren't forcing the descriptor creation. * already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs * @throws IOException if a filesystem error occurs
*/ */
public static boolean createTableDescriptorForTableDirectory(FileSystem fs, Path tabledir, public boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException { HTableDescriptor htd, boolean forceCreation) throws IOException {
FileStatus status = getTableInfoPath(fs, tabledir); if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}
FileStatus status = getTableInfoPath(fs, tableDir);
if (status != null) { if (status != null) {
LOG.info("Current tableInfoPath = " + status.getPath()); LOG.debug("Current tableInfoPath = " + status.getPath());
if (!forceCreation) { if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) { if (fs.exists(status.getPath()) && status.getLen() > 0) {
if (getTableDescriptor(fs, status.getPath().getParent()).equals(htableDescriptor)) { if (readTableDescriptor(fs, status, false).equals(htd)) {
LOG.info("TableInfo already exists.. Skipping creation"); LOG.debug("TableInfo already exists.. Skipping creation");
return false; return false;
} }
} }
} }
} }
Path p = writeTableDescriptor(fs, htableDescriptor, tabledir, status); Path p = writeTableDescriptor(fs, htd, tableDir, status);
return p != null; return p != null;
} }
} }

View File

@ -875,7 +875,7 @@ public abstract class FSUtils {
} }
/** /**
* Checks if root region exists * Checks if meta region exists
* *
* @param fs file system * @param fs file system
* @param rootdir root directory of HBase installation * @param rootdir root directory of HBase installation
@ -885,9 +885,9 @@ public abstract class FSUtils {
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public static boolean metaRegionExists(FileSystem fs, Path rootdir) public static boolean metaRegionExists(FileSystem fs, Path rootdir)
throws IOException { throws IOException {
Path rootRegionDir = Path metaRegionDir =
HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO); HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
return fs.exists(rootRegionDir); return fs.exists(metaRegionDir);
} }
/** /**

View File

@ -804,7 +804,7 @@ public class HBaseFsck extends Configured implements Tool {
tablesInfo.put(tableName, modTInfo); tablesInfo.put(tableName, modTInfo);
try { try {
HTableDescriptor htd = HTableDescriptor htd =
FSTableDescriptors.getTableDescriptor(hbaseRoot.getFileSystem(getConf()), FSTableDescriptors.getTableDescriptorFromFs(hbaseRoot.getFileSystem(getConf()),
hbaseRoot, tableName); hbaseRoot, tableName);
modTInfo.htds.add(htd); modTInfo.htds.add(htd);
} catch (IOException ioe) { } catch (IOException ioe) {
@ -849,16 +849,16 @@ public class HBaseFsck extends Configured implements Tool {
* 1. the correct tablename <br> * 1. the correct tablename <br>
* 2. the correct colfamily list<br> * 2. the correct colfamily list<br>
* 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br> * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @param tableName
* @throws IOException * @throws IOException
*/ */
private boolean fabricateTableInfo(String tableName, Set<String> columns) throws IOException { private boolean fabricateTableInfo(FSTableDescriptors fstd, String tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false; if (columns ==null || columns.isEmpty()) return false;
HTableDescriptor htd = new HTableDescriptor(tableName); HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) { for (String columnfamimly : columns) {
htd.addFamily(new HColumnDescriptor(columnfamimly)); htd.addFamily(new HColumnDescriptor(columnfamimly));
} }
FSTableDescriptors.createTableDescriptor(htd, getConf(), true); fstd.createTableDescriptor(htd, true);
return true; return true;
} }
@ -889,13 +889,13 @@ public class HBaseFsck extends Configured implements Tool {
public void fixOrphanTables() throws IOException { public void fixOrphanTables() throws IOException {
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
Path hbaseRoot = FSUtils.getRootDir(getConf());
List<String> tmpList = new ArrayList<String>(); List<String> tmpList = new ArrayList<String>();
tmpList.addAll(orphanTableDirs.keySet()); tmpList.addAll(orphanTableDirs.keySet());
HTableDescriptor[] htds = getHTableDescriptors(tmpList); HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<String, Set<String>>> iter = orphanTableDirs.entrySet().iterator(); Iterator<Entry<String, Set<String>>> iter = orphanTableDirs.entrySet().iterator();
int j = 0; int j = 0;
int numFailedCase = 0; int numFailedCase = 0;
FSTableDescriptors fstd = new FSTableDescriptors(getConf());
while (iter.hasNext()) { while (iter.hasNext()) {
Entry<String, Set<String>> entry = (Entry<String, Set<String>>) iter.next(); Entry<String, Set<String>> entry = (Entry<String, Set<String>>) iter.next();
String tableName = entry.getKey(); String tableName = entry.getKey();
@ -904,13 +904,12 @@ public class HBaseFsck extends Configured implements Tool {
if (tableName.equals(Bytes.toString(htds[j].getName()))) { if (tableName.equals(Bytes.toString(htds[j].getName()))) {
HTableDescriptor htd = htds[j]; HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache"); LOG.info("fixing orphan table: " + tableName + " from cache");
FSTableDescriptors.createTableDescriptor( fstd.createTableDescriptor(htd, true);
hbaseRoot.getFileSystem(getConf()), hbaseRoot, htd, true);
j++; j++;
iter.remove(); iter.remove();
} }
} else { } else {
if (fabricateTableInfo(tableName, entry.getValue())) { if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file"); LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName); LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove(); iter.remove();

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaEditor;
@ -43,12 +42,9 @@ import org.apache.hadoop.hbase.client.HConnectable;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
@ -153,7 +149,7 @@ class HMerge {
this.rootDir = FSUtils.getRootDir(conf); this.rootDir = FSUtils.getRootDir(conf);
Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName); Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName);
this.htd = FSTableDescriptors.getTableDescriptor(this.fs, tabledir); this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME; String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf); this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);

View File

@ -61,7 +61,6 @@ public class Merge extends Configured implements Tool {
private byte [] tableName; // Name of table private byte [] tableName; // Name of table
private volatile byte [] region1; // Name of region 1 private volatile byte [] region1; // Name of region 1
private volatile byte [] region2; // Name of region 2 private volatile byte [] region2; // Name of region 2
private volatile boolean isMetaTable;
private volatile HRegionInfo mergeInfo; private volatile HRegionInfo mergeInfo;
/** default constructor */ /** default constructor */
@ -153,8 +152,8 @@ public class Merge extends Configured implements Tool {
if (info2 == null) { if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta); throw new NullPointerException("info2 is null using key " + meta);
} }
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()), HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
this.rootdir, this.tableName); this.rootdir, Bytes.toString(this.tableName));
HRegion merged = merge(htd, meta, info1, info2); HRegion merged = merge(htd, meta, info1, info2);
LOG.info("Adding " + merged.getRegionInfo() + " to " + LOG.info("Adding " + merged.getRegionInfo() + " to " +
@ -246,7 +245,6 @@ public class Merge extends Configured implements Tool {
return -1; return -1;
} }
tableName = Bytes.toBytes(remainingArgs[0]); tableName = Bytes.toBytes(remainingArgs[0]);
isMetaTable = Bytes.compareTo(tableName, HConstants.META_TABLE_NAME) == 0;
region1 = Bytes.toBytesBinary(remainingArgs[1]); region1 = Bytes.toBytesBinary(remainingArgs[1]);
region2 = Bytes.toBytesBinary(remainingArgs[2]); region2 = Bytes.toBytesBinary(remainingArgs[2]);

View File

@ -39,10 +39,10 @@ public class TestFSTableDescriptorForceCreation {
final String name = "newTable2"; final String name = "newTable2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
assertTrue("Should create new table descriptor", assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
} }
@Test @Test
@ -50,13 +50,12 @@ public class TestFSTableDescriptorForceCreation {
throws IOException { throws IOException {
final String name = "testAlreadyExists"; final String name = "testAlreadyExists";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around. // Cleanup old tests if any detritus laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd); fstd.add(htd);
assertFalse("Should not create new table descriptor", assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
} }
@Test @Test
@ -65,10 +64,11 @@ public class TestFSTableDescriptorForceCreation {
final String name = "createNewTableNew2"; final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false); fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor", assertTrue("Should create new table descriptor",
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true)); fstd.createTableDescriptor(htd, true));
} }
} }

View File

@ -193,8 +193,9 @@ public class TestMasterFailover {
FileSystem filesystem = FileSystem.get(conf); FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf); Path rootdir = FSUtils.getRootDir(conf);
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
// Write the .tableinfo // Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled); fstd.createTableDescriptor(htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
createRegion(hriEnabled, rootdir, conf, htdEnabled); createRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -206,7 +207,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo // Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled); fstd.createTableDescriptor(htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
createRegion(hriDisabled, rootdir, conf, htdDisabled); createRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta( List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
@ -495,8 +496,9 @@ public class TestMasterFailover {
htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
FileSystem filesystem = FileSystem.get(conf); FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf); Path rootdir = FSUtils.getRootDir(conf);
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
// Write the .tableinfo // Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled); fstd.createTableDescriptor(htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null); null, null);
createRegion(hriEnabled, rootdir, conf, htdEnabled); createRegion(hriEnabled, rootdir, conf, htdEnabled);
@ -508,7 +510,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo // Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled); fstd.createTableDescriptor(htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
createRegion(hriDisabled, rootdir, conf, htdDisabled); createRegion(hriDisabled, rootdir, conf, htdDisabled);

View File

@ -144,7 +144,7 @@ public class TestTableDescriptorModification {
// Verify descriptor from HDFS // Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName); Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptor(mfs.getFileSystem(), tableDir); htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(htd, tableName, families); verifyTableDescriptor(htd, tableName, families);
} }

View File

@ -193,9 +193,9 @@ public class SnapshotTestingUtils {
HConstants.HREGION_LOGDIR_NAME)); HConstants.HREGION_LOGDIR_NAME));
} }
// check the table info // check the table info
HTableDescriptor desc = FSTableDescriptors.getTableDescriptor(fs, rootDir, HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
tableName); Bytes.toString(tableName));
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptor(fs, HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs,
snapshotDir); snapshotDir);
assertEquals(desc, snapshotDesc); assertEquals(desc, snapshotDesc);

View File

@ -359,9 +359,10 @@ public class TestFlushSnapshotFromClient {
assertTrue(fs.exists(snapshotinfo)); assertTrue(fs.exists(snapshotinfo));
// check the table info // check the table info
HTableDescriptor desc = FSTableDescriptors.getTableDescriptor(fs, rootDir, TABLE_NAME); HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs,
HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptor(fs, rootDir,STRING_TABLE_NAME);
SnapshotDescriptionUtils.getSnapshotsDir(rootDir), Bytes.toBytes(snapshotName)); HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs,
SnapshotDescriptionUtils.getSnapshotsDir(rootDir), snapshotName);
assertEquals(desc, snapshotDesc); assertEquals(desc, snapshotDesc);
// check the region snapshot for all the regions // check the region snapshot for all the regions

View File

@ -126,7 +126,7 @@ public class TestRestoreSnapshotHelper {
LOG.debug("pre-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir); LOG.debug("pre-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG); FSUtils.logFileSystemState(fs, rootDir, LOG);
FSTableDescriptors.createTableDescriptor(htdClone, conf); new FSTableDescriptors(conf).createTableDescriptor(htdClone);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone); RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone);
helper.restoreHdfsRegions(); helper.restoreHdfsRegions();

View File

@ -27,6 +27,7 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -55,12 +56,12 @@ public class TestFSTableDescriptors {
@Test (expected=IllegalArgumentException.class) @Test (expected=IllegalArgumentException.class)
public void testRegexAgainstOldStyleTableInfo() { public void testRegexAgainstOldStyleTableInfo() {
Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME); Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX);
int i = FSTableDescriptors.getTableInfoSequenceid(p); int i = FSTableDescriptors.getTableInfoSequenceId(p);
assertEquals(0, i); assertEquals(0, i);
// Assert it won't eat garbage -- that it fails // Assert it won't eat garbage -- that it fails
p = new Path("/tmp", "abc"); p = new Path("/tmp", "abc");
FSTableDescriptors.getTableInfoSequenceid(p); FSTableDescriptors.getTableInfoSequenceId(p);
} }
@Test @Test
@ -68,12 +69,13 @@ public class TestFSTableDescriptors {
Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
HTableDescriptor htd = new HTableDescriptor("testCreate"); HTableDescriptor htd = new HTableDescriptor("testCreate");
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
FileStatus [] statuses = fs.listStatus(testdir); FileStatus [] statuses = fs.listStatus(testdir);
assertTrue("statuses.length="+statuses.length, statuses.length == 1); assertTrue("statuses.length="+statuses.length, statuses.length == 1);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); fstd.updateTableDescriptor(htd);
} }
statuses = fs.listStatus(testdir); statuses = fs.listStatus(testdir);
assertTrue(statuses.length == 1); assertTrue(statuses.length == 1);
@ -83,53 +85,53 @@ public class TestFSTableDescriptors {
} }
@Test @Test
public void testSequenceidAdvancesOnTableInfo() throws IOException { public void testSequenceIdAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); Path p0 = fstd.updateTableDescriptor(htd);
Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
Path p1 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file. // Assert we cleaned up the old file.
assertTrue(!fs.exists(p0)); assertTrue(!fs.exists(p0));
int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); int i1 = FSTableDescriptors.getTableInfoSequenceId(p1);
assertTrue(i1 == i0 + 1); assertTrue(i1 == i0 + 1);
Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); Path p2 = fstd.updateTableDescriptor(htd);
// Assert we cleaned up the old file. // Assert we cleaned up the old file.
assertTrue(!fs.exists(p1)); assertTrue(!fs.exists(p1));
int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2);
assertTrue(i2 == i1 + 1); assertTrue(i2 == i1 + 1);
} }
@Test @Test
public void testFormatTableInfoSequenceId() { public void testFormatTableInfoSequenceId() {
Path p0 = assertWriteAndReadSequenceid(0); Path p0 = assertWriteAndReadSequenceId(0);
// Assert p0 has format we expect. // Assert p0 has format we expect.
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
sb.append("0"); sb.append("0");
} }
assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(), assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(),
p0.getName()); p0.getName());
// Check a few more. // Check a few more.
Path p2 = assertWriteAndReadSequenceid(2); Path p2 = assertWriteAndReadSequenceId(2);
Path p10000 = assertWriteAndReadSequenceid(10000); Path p10000 = assertWriteAndReadSequenceId(10000);
// Get a .tablinfo that has no sequenceid suffix. // Get a .tablinfo that has no sequenceid suffix.
Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME); Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX);
FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
FSTableDescriptors.FileStatusFileNameComparator comparator = Comparator<FileStatus> comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
new FSTableDescriptors.FileStatusFileNameComparator();
assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs, fs0) > 0);
assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs0, fs2) > 0);
assertTrue(comparator.compare(fs2, fs10000) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0);
} }
private Path assertWriteAndReadSequenceid(final int i) { private Path assertWriteAndReadSequenceId(final int i) {
Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i); Path p = new Path("/tmp", FSTableDescriptors.getTableInfoFileName(i));
int ii = FSTableDescriptors.getTableInfoSequenceid(p); int ii = FSTableDescriptors.getTableInfoSequenceId(p);
assertEquals(i, ii); assertEquals(i, ii);
return p; return p;
} }
@ -152,30 +154,19 @@ public class TestFSTableDescriptors {
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
Path rootdir = UTIL.getDataTestDir(name); Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
fstd.createTableDescriptor(htd);
HTableDescriptor htd2 = HTableDescriptor htd2 =
FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString()); FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getNameAsString());
assertTrue(htd.equals(htd2)); assertTrue(htd.equals(htd2));
} }
private void createHTDInFS(final FileSystem fs, Path rootdir,
final HTableDescriptor htd)
throws IOException {
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
}
@Test public void testHTableDescriptors() @Test public void testHTableDescriptors()
throws IOException, InterruptedException { throws IOException, InterruptedException {
final String name = "testHTableDescriptors"; final String name = "testHTableDescriptors";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around. // Cleanup old tests if any debris laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
createHTDInFS(fs, rootdir, htd);
}
FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
@Override @Override
public HTableDescriptor get(byte[] tablename) public HTableDescriptor get(byte[] tablename)
@ -184,6 +175,13 @@ public class TestFSTableDescriptors {
return super.get(tablename); return super.get(tablename);
} }
}; };
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htds.createTableDescriptor(htd);
}
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
assertTrue(htds.get(Bytes.toBytes(name + i)) != null); assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
} }
@ -194,7 +192,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i); HTableDescriptor htd = new HTableDescriptor(name + i);
htd.addFamily(new HColumnDescriptor("" + i)); htd.addFamily(new HColumnDescriptor("" + i));
FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); htds.updateTableDescriptor(htd);
} }
// Wait a while so mod time we write is for sure different. // Wait a while so mod time we write is for sure different.
Thread.sleep(100); Thread.sleep(100);
@ -240,7 +238,8 @@ public class TestFSTableDescriptors {
@Test @Test
public void testTableInfoFileStatusComparator() { public void testTableInfoFileStatusComparator() {
FileStatus bare = FileStatus bare =
new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME)); new FileStatus(0, false, 0, 0, -1,
new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX));
FileStatus future = FileStatus future =
new FileStatus(0, false, 0, 0, -1, new FileStatus(0, false, 0, 0, -1,
new Path("/tmp/tablinfo." + System.currentTimeMillis())); new Path("/tmp/tablinfo." + System.currentTimeMillis()));
@ -250,8 +249,7 @@ public class TestFSTableDescriptors {
FileStatus [] alist = {bare, future, farFuture}; FileStatus [] alist = {bare, future, farFuture};
FileStatus [] blist = {bare, farFuture, future}; FileStatus [] blist = {bare, farFuture, future};
FileStatus [] clist = {farFuture, bare, future}; FileStatus [] clist = {farFuture, bare, future};
FSTableDescriptors.FileStatusFileNameComparator c = Comparator<FileStatus> c = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR;
new FSTableDescriptors.FileStatusFileNameComparator();
Arrays.sort(alist, c); Arrays.sort(alist, c);
Arrays.sort(blist, c); Arrays.sort(blist, c);
Arrays.sort(clist, c); Arrays.sort(clist, c);
@ -282,16 +280,17 @@ public class TestFSTableDescriptors {
HTableDescriptor htd = new HTableDescriptor( HTableDescriptor htd = new HTableDescriptor(
"testCreateTableDescriptorUpdatesIfThereExistsAlready"); "testCreateTableDescriptorUpdatesIfThereExistsAlready");
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); //this will re-create assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = FSUtils.getTablePath(testdir, htd.getName()); Path tableDir = fstd.getTableDirectory(htd.getNameAsString());
Path tmpTableDir = new Path(tableDir, ".tmp"); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
FileStatus[] statuses = fs.listStatus(tmpTableDir); FileStatus[] statuses = fs.listStatus(tmpTableDir);
assertTrue(statuses.length == 0); assertTrue(statuses.length == 0);
assertEquals(htd, FSTableDescriptors.getTableDescriptor(fs, tableDir)); assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir));
} }
} }

View File

@ -1099,8 +1099,6 @@ public class TestHBaseFsck {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster()); assertTrue(cluster.waitForActiveAndReadyMaster());
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = FSUtils.getRootDir(conf);
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"), byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") }; Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
@ -1108,8 +1106,8 @@ public class TestHBaseFsck {
htdDisabled.addFamily(new HColumnDescriptor(FAM)); htdDisabled.addFamily(new HColumnDescriptor(FAM));
// Write the .tableinfo // Write the .tableinfo
FSTableDescriptors FSTableDescriptors fstd = new FSTableDescriptors(conf);
.createTableDescriptor(filesystem, rootdir, htdDisabled); fstd.createTableDescriptor(htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta( List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);

View File

@ -95,7 +95,7 @@ public class TestMergeTable {
// Create regions and populate them at same time. Create the tabledir // Create regions and populate them at same time. Create the tabledir
// for them first. // for them first.
FSTableDescriptors.createTableDescriptor(fs, rootdir, desc); new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
HRegion [] regions = { HRegion [] regions = {
createRegion(desc, null, row_70001, 1, 70000, rootdir), createRegion(desc, null, row_70001, 1, 70000, rootdir),
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),

View File

@ -139,7 +139,7 @@ public class TestMergeTool extends HBaseTestCase {
try { try {
// Create meta region // Create meta region
createMetaRegion(); createMetaRegion();
FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc); new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(this.desc);
/* /*
* Create the regions we will merge * Create the regions we will merge
*/ */