HBASE-12016 Reduce number of versions in Meta table. Make it configurable (Andrey Stepachev)
This commit is contained in:
parent
4057f6c4e4
commit
a8be606145
|
@ -1261,7 +1261,11 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
|
|||
new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
|
||||
}
|
||||
|
||||
/** Table descriptor for <code>hbase:meta</code> catalog table */
|
||||
/** Table descriptor for <code>hbase:meta</code> catalog table
|
||||
* Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
|
||||
* Admin#getTableDescriptor(TableName.META_TABLE) instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||
TableName.META_TABLE_NAME,
|
||||
new HColumnDescriptor[] {
|
||||
|
|
|
@ -2473,9 +2473,6 @@ class ConnectionManager {
|
|||
public HTableDescriptor getHTableDescriptor(final TableName tableName)
|
||||
throws IOException {
|
||||
if (tableName == null) return null;
|
||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
||||
return HTableDescriptor.META_TABLEDESC;
|
||||
}
|
||||
MasterKeepAliveConnection master = getKeepAliveMasterService();
|
||||
GetTableDescriptorsResponse htds;
|
||||
try {
|
||||
|
|
|
@ -37,7 +37,11 @@ public class TestHColumnDescriptor {
|
|||
@Test
|
||||
public void testPb() throws DeserializationException {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(
|
||||
HTableDescriptor.META_TABLEDESC.getColumnFamilies()[0]);
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
.setInMemory(true)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
.setCacheDataInL1(true));
|
||||
final int v = 123;
|
||||
hcd.setBlocksize(v);
|
||||
hcd.setTimeToLive(v);
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TestHTableDescriptor {
|
|||
|
||||
@Test
|
||||
public void testPb() throws DeserializationException, IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
|
||||
final int v = 123;
|
||||
htd.setMaxFileSize(v);
|
||||
htd.setDurability(Durability.ASYNC_WAL);
|
||||
|
|
|
@ -693,6 +693,26 @@ public final class HConstants {
|
|||
*/
|
||||
public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100;
|
||||
|
||||
/**
|
||||
* Parameter name for number of versions, kept by meta table.
|
||||
*/
|
||||
public static String HBASE_META_VERSIONS = "hbase.meta.versions";
|
||||
|
||||
/**
|
||||
* Default value of {@link #HBASE_META_VERSIONS}.
|
||||
*/
|
||||
public static int DEFAULT_HBASE_META_VERSIONS = 3;
|
||||
|
||||
/**
|
||||
* Parameter name for number of versions, kept by meta table.
|
||||
*/
|
||||
public static String HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
|
||||
|
||||
/**
|
||||
* Default value of {@link #HBASE_META_BLOCK_SIZE}.
|
||||
*/
|
||||
public static int DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
|
||||
|
||||
/**
|
||||
* Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
|
||||
* instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
|
||||
|
|
|
@ -22,10 +22,12 @@ import java.io.IOException;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
|
||||
/**
|
||||
* Class represents table state on HDFS.
|
||||
|
@ -151,4 +153,30 @@ public class TableDescriptor {
|
|||
", tableState=" + tableState +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
|
||||
throws IOException {
|
||||
HTableDescriptor metaDescriptor = new HTableDescriptor(
|
||||
TableName.META_TABLE_NAME,
|
||||
new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||
.setInMemory(true)
|
||||
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||
.setBloomFilterType(BloomType.NONE)
|
||||
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||
// e.g. if using CombinedBlockCache (BucketCache).
|
||||
.setCacheDataInL1(true)
|
||||
}) {
|
||||
};
|
||||
metaDescriptor.addCoprocessor(
|
||||
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
|
||||
null, Coprocessor.PRIORITY_SYSTEM, null);
|
||||
return metaDescriptor;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -457,8 +457,11 @@ public class MasterFileSystem {
|
|||
|
||||
// Create tableinfo-s for hbase:meta if not already there.
|
||||
// assume, created table descriptor is for enabling table
|
||||
new FSTableDescriptors(fs, rd).createTableDescriptor(
|
||||
new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLING));
|
||||
// meta table is a system table, so descriptors are predefined,
|
||||
// we should get them from registry.
|
||||
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
|
||||
fsd.createTableDescriptor(
|
||||
new TableDescriptor(fsd.get(TableName.META_TABLE_NAME), TableState.State.ENABLING));
|
||||
|
||||
return rd;
|
||||
}
|
||||
|
@ -498,10 +501,10 @@ public class MasterFileSystem {
|
|||
// not make it in first place. Turn off block caching for bootstrap.
|
||||
// Enable after.
|
||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||
setInfoFamilyCachingForMeta(false);
|
||||
HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
|
||||
HTableDescriptor.META_TABLEDESC, null, true, true);
|
||||
setInfoFamilyCachingForMeta(true);
|
||||
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
|
||||
setInfoFamilyCachingForMeta(metaDescriptor, false);
|
||||
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor);
|
||||
setInfoFamilyCachingForMeta(metaDescriptor, true);
|
||||
HRegion.closeHRegion(meta);
|
||||
} catch (IOException e) {
|
||||
e = e instanceof RemoteException ?
|
||||
|
@ -514,9 +517,8 @@ public class MasterFileSystem {
|
|||
/**
|
||||
* Enable in memory caching for hbase:meta
|
||||
*/
|
||||
public static void setInfoFamilyCachingForMeta(final boolean b) {
|
||||
for (HColumnDescriptor hcd:
|
||||
HTableDescriptor.META_TABLEDESC.getColumnFamilies()) {
|
||||
public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
|
||||
for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
|
||||
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
||||
hcd.setBlockCacheEnabled(b);
|
||||
hcd.setInMemory(b);
|
||||
|
|
|
@ -386,8 +386,9 @@ public class NamespaceUpgrade implements Tool {
|
|||
HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
|
||||
metaLogName, conf, null,
|
||||
fakeServer.toString());
|
||||
FSTableDescriptors fst = new FSTableDescriptors(conf);
|
||||
HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
|
||||
HTableDescriptor.META_TABLEDESC, metaHLog, conf);
|
||||
fst.get(TableName.META_TABLE_NAME), metaHLog, conf);
|
||||
HRegion region = null;
|
||||
try {
|
||||
for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
|
||||
|
|
|
@ -140,6 +140,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
|||
import org.apache.hadoop.hbase.util.CompressionTest;
|
||||
import org.apache.hadoop.hbase.util.Counter;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HashedBytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
@ -5901,10 +5902,12 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
final boolean majorCompact)
|
||||
throws IOException {
|
||||
HRegion region;
|
||||
FSTableDescriptors fst = new FSTableDescriptors(c);
|
||||
// Currently expects tables have one region only.
|
||||
if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) {
|
||||
region = HRegion.newHRegion(p, log, fs, c,
|
||||
HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null);
|
||||
HRegionInfo.FIRST_META_REGIONINFO,
|
||||
fst.get(TableName.META_TABLE_NAME), null);
|
||||
} else {
|
||||
throw new IOException("Not a known catalog table: " + p.toString());
|
||||
}
|
||||
|
|
|
@ -493,7 +493,7 @@ public class HRegionServer extends HasThread implements
|
|||
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
|
||||
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
|
||||
this.rootDir = FSUtils.getRootDir(this.conf);
|
||||
this.tableDescriptors = new FSTableDescriptors(
|
||||
this.tableDescriptors = new FSTableDescriptors(this.conf,
|
||||
this.fs, this.rootDir, !canUpdateTableDescriptor());
|
||||
|
||||
service = new ExecutorService(getServerName().toShortString());
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.DrainBarrier;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HasThread;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
@ -2052,9 +2053,11 @@ class FSHLog implements HLog, Syncable {
|
|||
WALEdit walEdit = new WALEdit();
|
||||
walEdit.add(new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"),
|
||||
Bytes.toBytes("qualifier"), -1, new byte [1000]));
|
||||
FSTableDescriptors fst = new FSTableDescriptors(conf);
|
||||
for (AtomicLong i = new AtomicLong(0); i.get() < count; i.incrementAndGet()) {
|
||||
wal.append(HRegionInfo.FIRST_META_REGIONINFO, TableName.META_TABLE_NAME, walEdit, start,
|
||||
HTableDescriptor.META_TABLEDESC, i);
|
||||
wal.append(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
TableName.META_TABLE_NAME, walEdit, start,
|
||||
fst.get(TableName.META_TABLE_NAME), i);
|
||||
wal.sync();
|
||||
}
|
||||
wal.close();
|
||||
|
|
|
@ -2173,15 +2173,9 @@ public class AccessController extends BaseMasterAndRegionObserver
|
|||
else {
|
||||
MasterServices masterServices = ctx.getEnvironment().getMasterServices();
|
||||
for (TableName tableName: tableNamesList) {
|
||||
// Do not deny if the table does not exist
|
||||
try {
|
||||
masterServices.checkTableModifiable(tableName);
|
||||
} catch (TableNotFoundException ex) {
|
||||
// Skip checks for a table that does not exist
|
||||
if (!masterServices.getTableStateManager().isTablePresent(tableName))
|
||||
continue;
|
||||
} catch (TableNotDisabledException ex) {
|
||||
// We don't care about this
|
||||
}
|
||||
requirePermission("getTableDescriptors", tableName, null, null,
|
||||
Action.ADMIN, Action.CREATE);
|
||||
}
|
||||
|
|
|
@ -355,7 +355,7 @@ public class SnapshotManifest {
|
|||
Path rootDir = FSUtils.getRootDir(conf);
|
||||
LOG.info("Using old Snapshot Format");
|
||||
// write a copy of descriptor to the snapshot directory
|
||||
new FSTableDescriptors(fs, rootDir)
|
||||
new FSTableDescriptors(conf, fs, rootDir)
|
||||
.createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
|
||||
htd, TableState.State.ENABLED), false);
|
||||
} else {
|
||||
|
|
|
@ -28,6 +28,8 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.primitives.Ints;
|
||||
import org.apache.commons.lang.NotImplementedException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -39,19 +41,18 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.TableInfoMissingException;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.TableInfoMissingException;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.primitives.Ints;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
|
||||
/**
|
||||
* Implementation of {@link TableDescriptors} that reads descriptors from the
|
||||
|
@ -91,6 +92,11 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
private final Map<TableName, TableDescriptorAndModtime> cache =
|
||||
new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
|
||||
|
||||
/**
|
||||
* Table descriptor for <code>hbase:meta</code> catalog table
|
||||
*/
|
||||
private final HTableDescriptor metaTableDescritor;
|
||||
|
||||
/**
|
||||
* Data structure to hold modification time and table descriptor.
|
||||
*/
|
||||
|
@ -126,23 +132,26 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
* This instance can do write operations (is not read only).
|
||||
*/
|
||||
public FSTableDescriptors(final Configuration conf) throws IOException {
|
||||
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
|
||||
this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
|
||||
}
|
||||
|
||||
public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
|
||||
this(fs, rootdir, false);
|
||||
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
|
||||
throws IOException {
|
||||
this(conf, fs, rootdir, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param fsreadonly True if we are read-only when it comes to filesystem
|
||||
* operations; i.e. on remove, we do not do delete in fs.
|
||||
*/
|
||||
public FSTableDescriptors(final FileSystem fs,
|
||||
final Path rootdir, final boolean fsreadonly) {
|
||||
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
||||
final Path rootdir, final boolean fsreadonly) throws IOException {
|
||||
super();
|
||||
this.fs = fs;
|
||||
this.rootdir = rootdir;
|
||||
this.fsreadonly = fsreadonly;
|
||||
|
||||
this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -158,7 +167,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
invocations++;
|
||||
if (TableName.META_TABLE_NAME.equals(tablename)) {
|
||||
cachehits++;
|
||||
return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
|
||||
return new TableDescriptor(metaTableDescritor, TableState.State.ENABLED);
|
||||
}
|
||||
// hbase:meta is already handled. If some one tries to get the descriptor for
|
||||
// .logs, .oldlogs or .corrupt throw an exception.
|
||||
|
@ -204,7 +213,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
|||
public HTableDescriptor get(TableName tableName) throws IOException {
|
||||
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
||||
cachehits++;
|
||||
return HTableDescriptor.META_TABLEDESC;
|
||||
return metaTableDescritor;
|
||||
}
|
||||
TableDescriptor descriptor = getDescriptor(tableName);
|
||||
return descriptor == null ? null : descriptor.getHTableDescriptor();
|
||||
|
|
|
@ -1182,10 +1182,10 @@ public class HBaseFsck extends Configured {
|
|||
Path rootdir = FSUtils.getRootDir(getConf());
|
||||
Configuration c = getConf();
|
||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||
MasterFileSystem.setInfoFamilyCachingForMeta(false);
|
||||
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c,
|
||||
HTableDescriptor.META_TABLEDESC);
|
||||
MasterFileSystem.setInfoFamilyCachingForMeta(true);
|
||||
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
|
||||
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
|
||||
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor);
|
||||
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
|
||||
return meta;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
||||
|
@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
|||
public class MetaUtils {
|
||||
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
||||
private final Configuration conf;
|
||||
private final FSTableDescriptors descriptors;
|
||||
private FileSystem fs;
|
||||
private HLog log;
|
||||
private HRegion metaRegion;
|
||||
|
@ -69,6 +71,7 @@ public class MetaUtils {
|
|||
this.conf = conf;
|
||||
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
|
||||
this.metaRegion = null;
|
||||
this.descriptors = new FSTableDescriptors(conf);
|
||||
initialize();
|
||||
}
|
||||
|
||||
|
@ -147,7 +150,7 @@ public class MetaUtils {
|
|||
return this.metaRegion;
|
||||
}
|
||||
this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
HTableDescriptor.META_TABLEDESC, getLog(),
|
||||
descriptors.get(TableName.META_TABLE_NAME), getLog(),
|
||||
this.conf);
|
||||
this.metaRegion.compactStores();
|
||||
return this.metaRegion;
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
||||
|
@ -74,6 +75,14 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
|
||||
|
||||
public volatile Configuration conf = HBaseConfiguration.create();
|
||||
public final FSTableDescriptors fsTableDescriptors;
|
||||
{
|
||||
try {
|
||||
fsTableDescriptors = new FSTableDescriptors(conf);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to init descriptors", e);
|
||||
}
|
||||
}
|
||||
|
||||
/** constructor */
|
||||
public HBaseTestCase() {
|
||||
|
@ -630,8 +639,9 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @throws IOException
|
||||
*/
|
||||
protected void createMetaRegion() throws IOException {
|
||||
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf);
|
||||
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
||||
conf, HTableDescriptor.META_TABLEDESC);
|
||||
conf, fsTableDescriptors.get(TableName.META_TABLE_NAME) );
|
||||
}
|
||||
|
||||
protected void closeRootAndMeta() throws IOException {
|
||||
|
|
|
@ -94,6 +94,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
|||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.tool.Canary;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
||||
|
@ -368,6 +369,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
return new Path(fs.getWorkingDirectory(), "test-data");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return META table descriptor
|
||||
*/
|
||||
public HTableDescriptor getMetaTableDescriptor() {
|
||||
try {
|
||||
return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Unable to create META table descriptor", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Where the DFS cluster will write data on the local subsystem.
|
||||
* Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()}
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestFSTableDescriptorForceCreation {
|
|||
final String name = "newTable2";
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
|
||||
assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
|
||||
|
@ -54,7 +54,7 @@ public class TestFSTableDescriptorForceCreation {
|
|||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
// Cleanup old tests if any detritus laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(name);
|
||||
fstd.add(htd);
|
||||
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
|
||||
|
@ -66,7 +66,7 @@ public class TestFSTableDescriptorForceCreation {
|
|||
final String name = "createNewTableNew2";
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
fstd.createTableDescriptor(htd, false);
|
||||
assertTrue("Should create new table descriptor",
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestTableDescriptor {
|
|||
|
||||
@Test
|
||||
public void testPb() throws DeserializationException, IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
|
||||
final int v = 123;
|
||||
htd.setMaxFileSize(v);
|
||||
htd.setDurability(Durability.ASYNC_WAL);
|
||||
|
|
|
@ -1162,7 +1162,7 @@ public class TestAdmin {
|
|||
public void testCreateBadTables() throws IOException {
|
||||
String msg = null;
|
||||
try {
|
||||
this.admin.createTable(HTableDescriptor.META_TABLEDESC);
|
||||
this.admin.createTable(new HTableDescriptor(TableName.META_TABLE_NAME));
|
||||
} catch(TableExistsException e) {
|
||||
msg = e.toString();
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ public class TestMasterFailover {
|
|||
|
||||
FileSystem filesystem = FileSystem.get(conf);
|
||||
Path rootdir = FSUtils.getRootDir(conf);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir);
|
||||
fstd.createTableDescriptor(offlineTable);
|
||||
|
||||
HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
|
||||
|
|
|
@ -69,10 +69,10 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
|||
FileSystem filesystem = FileSystem.get(conf);
|
||||
Path rootdir = testDir;
|
||||
// Up flush size else we bind up when we use default catalog flush of 16k.
|
||||
HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024);
|
||||
fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024);
|
||||
|
||||
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
rootdir, this.conf, HTableDescriptor.META_TABLEDESC);
|
||||
rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME));
|
||||
try {
|
||||
// Write rows for three tables 'A', 'B', and 'C'.
|
||||
for (char c = 'A'; c < 'D'; c++) {
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.RegionState;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -63,13 +64,14 @@ public class TestHRegionInfo {
|
|||
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
|
||||
Path basedir = htu.getDataTestDir();
|
||||
// Create a region. That'll write the .regioninfo file.
|
||||
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
|
||||
HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
|
||||
HTableDescriptor.META_TABLEDESC);
|
||||
fsTableDescriptors.get(TableName.META_TABLE_NAME));
|
||||
// Get modtime on the file.
|
||||
long modtime = getModTime(r);
|
||||
HRegion.closeHRegion(r);
|
||||
Thread.sleep(1001);
|
||||
r = HRegion.openHRegion(basedir, hri, HTableDescriptor.META_TABLEDESC,
|
||||
r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
|
||||
null, htu.getConfiguration());
|
||||
// Ensure the file is not written for a second time.
|
||||
long modtime2 = getModTime(r);
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TestLogRollingNoCluster {
|
|||
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
||||
this.wal.append(HRegionInfo.FIRST_META_REGIONINFO,
|
||||
TableName.META_TABLE_NAME,
|
||||
edit, now, HTableDescriptor.META_TABLEDESC, sequenceId);
|
||||
edit, now, TEST_UTIL.getMetaTableDescriptor(), sequenceId);
|
||||
}
|
||||
String msg = getName() + " finished";
|
||||
if (isException())
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestReplicationWALEntryFilters {
|
|||
|
||||
// meta
|
||||
HLogKey key1 = new HLogKey( HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
|
||||
HTableDescriptor.META_TABLEDESC.getTableName());
|
||||
TableName.META_TABLE_NAME);
|
||||
HLog.Entry metaEntry = new Entry(key1, null);
|
||||
|
||||
assertNull(filter.filter(metaEntry));
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TestFSTableDescriptors {
|
|||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate"));
|
||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||
assertTrue(fstd.createTableDescriptor(td));
|
||||
assertFalse(fstd.createTableDescriptor(td));
|
||||
FileStatus [] statuses = fs.listStatus(testdir);
|
||||
|
@ -98,7 +98,7 @@ public class TestFSTableDescriptors {
|
|||
TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
|
||||
TableDescriptor td = new TableDescriptor(htd);
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||
Path p0 = fstd.updateTableDescriptor(td);
|
||||
int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
|
||||
Path p1 = fstd.updateTableDescriptor(td);
|
||||
|
@ -159,7 +159,7 @@ public class TestFSTableDescriptors {
|
|||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
// Cleanup old tests if any detrius laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
||||
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
htds.add(htd);
|
||||
assertNotNull(htds.remove(htd.getTableName()));
|
||||
|
@ -172,7 +172,7 @@ public class TestFSTableDescriptors {
|
|||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||
Path rootdir = UTIL.getDataTestDir(name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
fstd.createTableDescriptor(td);
|
||||
TableDescriptor td2 =
|
||||
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
|
||||
|
@ -183,14 +183,14 @@ public class TestFSTableDescriptors {
|
|||
final String name = "testReadingOldHTDFromFS";
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
Path rootdir = UTIL.getDataTestDir(name);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||
Path descriptorFile = fstd.updateTableDescriptor(td);
|
||||
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
|
||||
out.write(htd.toByteArray());
|
||||
}
|
||||
FSTableDescriptors fstd2 = new FSTableDescriptors(fs, rootdir);
|
||||
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
TableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
|
||||
assertEquals(td, td2);
|
||||
FileStatus descriptorFile2 =
|
||||
|
@ -209,7 +209,7 @@ public class TestFSTableDescriptors {
|
|||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
// Cleanup old tests if any debris laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
|
||||
FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) {
|
||||
@Override
|
||||
public HTableDescriptor get(TableName tablename)
|
||||
throws TableExistsException, FileNotFoundException, IOException {
|
||||
|
@ -256,7 +256,7 @@ public class TestFSTableDescriptors {
|
|||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
// Cleanup old tests if any detrius laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
||||
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
assertNull("There shouldn't be any HTD for this table",
|
||||
htds.get(TableName.valueOf("NoSuchTable")));
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ public class TestFSTableDescriptors {
|
|||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
// Cleanup old tests if any detrius laying around.
|
||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
||||
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||
htds.add(htd);
|
||||
htds.add(htd);
|
||||
|
@ -304,8 +304,8 @@ public class TestFSTableDescriptors {
|
|||
public void testReadingInvalidDirectoryFromFS() throws IOException {
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
try {
|
||||
// .tmp dir is an invalid table name
|
||||
new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
|
||||
new FSTableDescriptors(UTIL.getConfiguration(), fs,
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()))
|
||||
.get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY));
|
||||
fail("Shouldn't be able to read a table descriptor for the archive directory.");
|
||||
} catch (Exception e) {
|
||||
|
@ -321,7 +321,7 @@ public class TestFSTableDescriptors {
|
|||
"testCreateTableDescriptorUpdatesIfThereExistsAlready"));
|
||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
||||
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||
assertTrue(fstd.createTableDescriptor(td));
|
||||
assertFalse(fstd.createTableDescriptor(td));
|
||||
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
|
||||
|
|
|
@ -2439,7 +2439,7 @@ public class TestHBaseFsck {
|
|||
LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
|
||||
Path rootDir = FSUtils.getRootDir(conf);
|
||||
FileSystem fs = rootDir.getFileSystem(conf);
|
||||
Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(),
|
||||
Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
|
||||
hri.getEncodedName());
|
||||
Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
|
||||
fs.delete(hriPath, true);
|
||||
|
@ -2449,7 +2449,7 @@ public class TestHBaseFsck {
|
|||
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
|
||||
Path rootDir = FSUtils.getRootDir(conf);
|
||||
FileSystem fs = rootDir.getFileSystem(conf);
|
||||
Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(),
|
||||
Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
|
||||
hri.getEncodedName());
|
||||
HBaseFsck.debugLsr(conf, p);
|
||||
boolean success = fs.delete(p, true);
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TestMergeTable {
|
|||
|
||||
// Create regions and populate them at same time. Create the tabledir
|
||||
// for them first.
|
||||
new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
|
||||
new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc);
|
||||
HRegion [] regions = {
|
||||
createRegion(desc, null, row_70001, 1, 70000, rootdir),
|
||||
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
|
||||
|
@ -161,7 +161,7 @@ public class TestMergeTable {
|
|||
throws IOException {
|
||||
HRegion meta =
|
||||
HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
|
||||
UTIL.getConfiguration(), HTableDescriptor.META_TABLEDESC);
|
||||
UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
|
||||
for (HRegion r: regions) {
|
||||
HRegion.addRegionToMETA(meta, r);
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ public class TestMergeTool extends HBaseTestCase {
|
|||
try {
|
||||
// Create meta region
|
||||
createMetaRegion();
|
||||
new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(
|
||||
new FSTableDescriptors(this.conf, this.fs, this.testDir).createTableDescriptor(
|
||||
new TableDescriptor(this.desc));
|
||||
/*
|
||||
* Create the regions we will merge
|
||||
|
|
Loading…
Reference in New Issue