HBASE-12016 Reduce number of versions in Meta table. Make it configurable (Andrey Stepachev)
This commit is contained in:
parent
4057f6c4e4
commit
a8be606145
|
@ -1261,7 +1261,11 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
|
||||||
new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
|
new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Table descriptor for <code>hbase:meta</code> catalog table */
|
/** Table descriptor for <code>hbase:meta</code> catalog table
|
||||||
|
* Deprecated, use TableDescriptors#get(TableName.META_TABLE) or
|
||||||
|
* Admin#getTableDescriptor(TableName.META_TABLE) instead.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||||
TableName.META_TABLE_NAME,
|
TableName.META_TABLE_NAME,
|
||||||
new HColumnDescriptor[] {
|
new HColumnDescriptor[] {
|
||||||
|
|
|
@ -2473,9 +2473,6 @@ class ConnectionManager {
|
||||||
public HTableDescriptor getHTableDescriptor(final TableName tableName)
|
public HTableDescriptor getHTableDescriptor(final TableName tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (tableName == null) return null;
|
if (tableName == null) return null;
|
||||||
if (tableName.equals(TableName.META_TABLE_NAME)) {
|
|
||||||
return HTableDescriptor.META_TABLEDESC;
|
|
||||||
}
|
|
||||||
MasterKeepAliveConnection master = getKeepAliveMasterService();
|
MasterKeepAliveConnection master = getKeepAliveMasterService();
|
||||||
GetTableDescriptorsResponse htds;
|
GetTableDescriptorsResponse htds;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -37,7 +37,11 @@ public class TestHColumnDescriptor {
|
||||||
@Test
|
@Test
|
||||||
public void testPb() throws DeserializationException {
|
public void testPb() throws DeserializationException {
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(
|
HColumnDescriptor hcd = new HColumnDescriptor(
|
||||||
HTableDescriptor.META_TABLEDESC.getColumnFamilies()[0]);
|
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||||
|
.setInMemory(true)
|
||||||
|
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||||
|
.setBloomFilterType(BloomType.NONE)
|
||||||
|
.setCacheDataInL1(true));
|
||||||
final int v = 123;
|
final int v = 123;
|
||||||
hcd.setBlocksize(v);
|
hcd.setBlocksize(v);
|
||||||
hcd.setTimeToLive(v);
|
hcd.setTimeToLive(v);
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class TestHTableDescriptor {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPb() throws DeserializationException, IOException {
|
public void testPb() throws DeserializationException, IOException {
|
||||||
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
|
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
|
||||||
final int v = 123;
|
final int v = 123;
|
||||||
htd.setMaxFileSize(v);
|
htd.setMaxFileSize(v);
|
||||||
htd.setDurability(Durability.ASYNC_WAL);
|
htd.setDurability(Durability.ASYNC_WAL);
|
||||||
|
|
|
@ -693,6 +693,26 @@ public final class HConstants {
|
||||||
*/
|
*/
|
||||||
public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100;
|
public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameter name for number of versions, kept by meta table.
|
||||||
|
*/
|
||||||
|
public static String HBASE_META_VERSIONS = "hbase.meta.versions";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default value of {@link #HBASE_META_VERSIONS}.
|
||||||
|
*/
|
||||||
|
public static int DEFAULT_HBASE_META_VERSIONS = 3;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameter name for number of versions, kept by meta table.
|
||||||
|
*/
|
||||||
|
public static String HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default value of {@link #HBASE_META_BLOCK_SIZE}.
|
||||||
|
*/
|
||||||
|
public static int DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
|
* Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
|
||||||
* instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
|
* instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
|
||||||
|
|
|
@ -22,10 +22,12 @@ import java.io.IOException;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class represents table state on HDFS.
|
* Class represents table state on HDFS.
|
||||||
|
@ -151,4 +153,30 @@ public class TableDescriptor {
|
||||||
", tableState=" + tableState +
|
", tableState=" + tableState +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static HTableDescriptor metaTableDescriptor(final Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
HTableDescriptor metaDescriptor = new HTableDescriptor(
|
||||||
|
TableName.META_TABLE_NAME,
|
||||||
|
new HColumnDescriptor[] {
|
||||||
|
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||||
|
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
|
||||||
|
HConstants.DEFAULT_HBASE_META_VERSIONS))
|
||||||
|
.setInMemory(true)
|
||||||
|
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
|
||||||
|
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
|
||||||
|
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||||
|
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
|
||||||
|
.setBloomFilterType(BloomType.NONE)
|
||||||
|
// Enable cache of data blocks in L1 if more than one caching tier deployed:
|
||||||
|
// e.g. if using CombinedBlockCache (BucketCache).
|
||||||
|
.setCacheDataInL1(true)
|
||||||
|
}) {
|
||||||
|
};
|
||||||
|
metaDescriptor.addCoprocessor(
|
||||||
|
"org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
|
||||||
|
null, Coprocessor.PRIORITY_SYSTEM, null);
|
||||||
|
return metaDescriptor;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -457,8 +457,11 @@ public class MasterFileSystem {
|
||||||
|
|
||||||
// Create tableinfo-s for hbase:meta if not already there.
|
// Create tableinfo-s for hbase:meta if not already there.
|
||||||
// assume, created table descriptor is for enabling table
|
// assume, created table descriptor is for enabling table
|
||||||
new FSTableDescriptors(fs, rd).createTableDescriptor(
|
// meta table is a system table, so descriptors are predefined,
|
||||||
new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLING));
|
// we should get them from registry.
|
||||||
|
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
|
||||||
|
fsd.createTableDescriptor(
|
||||||
|
new TableDescriptor(fsd.get(TableName.META_TABLE_NAME), TableState.State.ENABLING));
|
||||||
|
|
||||||
return rd;
|
return rd;
|
||||||
}
|
}
|
||||||
|
@ -498,10 +501,10 @@ public class MasterFileSystem {
|
||||||
// not make it in first place. Turn off block caching for bootstrap.
|
// not make it in first place. Turn off block caching for bootstrap.
|
||||||
// Enable after.
|
// Enable after.
|
||||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||||
setInfoFamilyCachingForMeta(false);
|
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
|
||||||
HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
|
setInfoFamilyCachingForMeta(metaDescriptor, false);
|
||||||
HTableDescriptor.META_TABLEDESC, null, true, true);
|
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor);
|
||||||
setInfoFamilyCachingForMeta(true);
|
setInfoFamilyCachingForMeta(metaDescriptor, true);
|
||||||
HRegion.closeHRegion(meta);
|
HRegion.closeHRegion(meta);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = e instanceof RemoteException ?
|
e = e instanceof RemoteException ?
|
||||||
|
@ -514,9 +517,8 @@ public class MasterFileSystem {
|
||||||
/**
|
/**
|
||||||
* Enable in memory caching for hbase:meta
|
* Enable in memory caching for hbase:meta
|
||||||
*/
|
*/
|
||||||
public static void setInfoFamilyCachingForMeta(final boolean b) {
|
public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
|
||||||
for (HColumnDescriptor hcd:
|
for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
|
||||||
HTableDescriptor.META_TABLEDESC.getColumnFamilies()) {
|
|
||||||
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
||||||
hcd.setBlockCacheEnabled(b);
|
hcd.setBlockCacheEnabled(b);
|
||||||
hcd.setInMemory(b);
|
hcd.setInMemory(b);
|
||||||
|
|
|
@ -386,8 +386,9 @@ public class NamespaceUpgrade implements Tool {
|
||||||
HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
|
HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
|
||||||
metaLogName, conf, null,
|
metaLogName, conf, null,
|
||||||
fakeServer.toString());
|
fakeServer.toString());
|
||||||
|
FSTableDescriptors fst = new FSTableDescriptors(conf);
|
||||||
HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
|
HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
HTableDescriptor.META_TABLEDESC, metaHLog, conf);
|
fst.get(TableName.META_TABLE_NAME), metaHLog, conf);
|
||||||
HRegion region = null;
|
HRegion region = null;
|
||||||
try {
|
try {
|
||||||
for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
|
for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
|
||||||
|
|
|
@ -140,6 +140,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.CompressionTest;
|
import org.apache.hadoop.hbase.util.CompressionTest;
|
||||||
import org.apache.hadoop.hbase.util.Counter;
|
import org.apache.hadoop.hbase.util.Counter;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.HashedBytes;
|
import org.apache.hadoop.hbase.util.HashedBytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
@ -5901,10 +5902,12 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
final boolean majorCompact)
|
final boolean majorCompact)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegion region;
|
HRegion region;
|
||||||
|
FSTableDescriptors fst = new FSTableDescriptors(c);
|
||||||
// Currently expects tables have one region only.
|
// Currently expects tables have one region only.
|
||||||
if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) {
|
if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) {
|
||||||
region = HRegion.newHRegion(p, log, fs, c,
|
region = HRegion.newHRegion(p, log, fs, c,
|
||||||
HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null);
|
HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
|
fst.get(TableName.META_TABLE_NAME), null);
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Not a known catalog table: " + p.toString());
|
throw new IOException("Not a known catalog table: " + p.toString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -493,7 +493,7 @@ public class HRegionServer extends HasThread implements
|
||||||
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
|
boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
|
||||||
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
|
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
|
||||||
this.rootDir = FSUtils.getRootDir(this.conf);
|
this.rootDir = FSUtils.getRootDir(this.conf);
|
||||||
this.tableDescriptors = new FSTableDescriptors(
|
this.tableDescriptors = new FSTableDescriptors(this.conf,
|
||||||
this.fs, this.rootDir, !canUpdateTableDescriptor());
|
this.fs, this.rootDir, !canUpdateTableDescriptor());
|
||||||
|
|
||||||
service = new ExecutorService(getServerName().toShortString());
|
service = new ExecutorService(getServerName().toShortString());
|
||||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.DrainBarrier;
|
import org.apache.hadoop.hbase.util.DrainBarrier;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.HasThread;
|
import org.apache.hadoop.hbase.util.HasThread;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
|
@ -2052,9 +2053,11 @@ class FSHLog implements HLog, Syncable {
|
||||||
WALEdit walEdit = new WALEdit();
|
WALEdit walEdit = new WALEdit();
|
||||||
walEdit.add(new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"),
|
walEdit.add(new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("family"),
|
||||||
Bytes.toBytes("qualifier"), -1, new byte [1000]));
|
Bytes.toBytes("qualifier"), -1, new byte [1000]));
|
||||||
|
FSTableDescriptors fst = new FSTableDescriptors(conf);
|
||||||
for (AtomicLong i = new AtomicLong(0); i.get() < count; i.incrementAndGet()) {
|
for (AtomicLong i = new AtomicLong(0); i.get() < count; i.incrementAndGet()) {
|
||||||
wal.append(HRegionInfo.FIRST_META_REGIONINFO, TableName.META_TABLE_NAME, walEdit, start,
|
wal.append(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
HTableDescriptor.META_TABLEDESC, i);
|
TableName.META_TABLE_NAME, walEdit, start,
|
||||||
|
fst.get(TableName.META_TABLE_NAME), i);
|
||||||
wal.sync();
|
wal.sync();
|
||||||
}
|
}
|
||||||
wal.close();
|
wal.close();
|
||||||
|
|
|
@ -2173,15 +2173,9 @@ public class AccessController extends BaseMasterAndRegionObserver
|
||||||
else {
|
else {
|
||||||
MasterServices masterServices = ctx.getEnvironment().getMasterServices();
|
MasterServices masterServices = ctx.getEnvironment().getMasterServices();
|
||||||
for (TableName tableName: tableNamesList) {
|
for (TableName tableName: tableNamesList) {
|
||||||
// Do not deny if the table does not exist
|
// Skip checks for a table that does not exist
|
||||||
try {
|
if (!masterServices.getTableStateManager().isTablePresent(tableName))
|
||||||
masterServices.checkTableModifiable(tableName);
|
|
||||||
} catch (TableNotFoundException ex) {
|
|
||||||
// Skip checks for a table that does not exist
|
|
||||||
continue;
|
continue;
|
||||||
} catch (TableNotDisabledException ex) {
|
|
||||||
// We don't care about this
|
|
||||||
}
|
|
||||||
requirePermission("getTableDescriptors", tableName, null, null,
|
requirePermission("getTableDescriptors", tableName, null, null,
|
||||||
Action.ADMIN, Action.CREATE);
|
Action.ADMIN, Action.CREATE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -355,7 +355,7 @@ public class SnapshotManifest {
|
||||||
Path rootDir = FSUtils.getRootDir(conf);
|
Path rootDir = FSUtils.getRootDir(conf);
|
||||||
LOG.info("Using old Snapshot Format");
|
LOG.info("Using old Snapshot Format");
|
||||||
// write a copy of descriptor to the snapshot directory
|
// write a copy of descriptor to the snapshot directory
|
||||||
new FSTableDescriptors(fs, rootDir)
|
new FSTableDescriptors(conf, fs, rootDir)
|
||||||
.createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
|
.createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
|
||||||
htd, TableState.State.ENABLED), false);
|
htd, TableState.State.ENABLED), false);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -28,6 +28,8 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.primitives.Ints;
|
||||||
import org.apache.commons.lang.NotImplementedException;
|
import org.apache.commons.lang.NotImplementedException;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -39,19 +41,18 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableDescriptor;
|
import org.apache.hadoop.hbase.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.TableDescriptors;
|
||||||
|
import org.apache.hadoop.hbase.TableInfoMissingException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.TableState;
|
import org.apache.hadoop.hbase.client.TableState;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.TableDescriptors;
|
|
||||||
import org.apache.hadoop.hbase.TableInfoMissingException;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.primitives.Ints;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Implementation of {@link TableDescriptors} that reads descriptors from the
|
* Implementation of {@link TableDescriptors} that reads descriptors from the
|
||||||
|
@ -91,6 +92,11 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
private final Map<TableName, TableDescriptorAndModtime> cache =
|
private final Map<TableName, TableDescriptorAndModtime> cache =
|
||||||
new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
|
new ConcurrentHashMap<TableName, TableDescriptorAndModtime>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Table descriptor for <code>hbase:meta</code> catalog table
|
||||||
|
*/
|
||||||
|
private final HTableDescriptor metaTableDescritor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Data structure to hold modification time and table descriptor.
|
* Data structure to hold modification time and table descriptor.
|
||||||
*/
|
*/
|
||||||
|
@ -126,23 +132,26 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
* This instance can do write operations (is not read only).
|
* This instance can do write operations (is not read only).
|
||||||
*/
|
*/
|
||||||
public FSTableDescriptors(final Configuration conf) throws IOException {
|
public FSTableDescriptors(final Configuration conf) throws IOException {
|
||||||
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
|
this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
public FSTableDescriptors(final FileSystem fs, final Path rootdir) {
|
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
|
||||||
this(fs, rootdir, false);
|
throws IOException {
|
||||||
|
this(conf, fs, rootdir, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param fsreadonly True if we are read-only when it comes to filesystem
|
* @param fsreadonly True if we are read-only when it comes to filesystem
|
||||||
* operations; i.e. on remove, we do not do delete in fs.
|
* operations; i.e. on remove, we do not do delete in fs.
|
||||||
*/
|
*/
|
||||||
public FSTableDescriptors(final FileSystem fs,
|
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
|
||||||
final Path rootdir, final boolean fsreadonly) {
|
final Path rootdir, final boolean fsreadonly) throws IOException {
|
||||||
super();
|
super();
|
||||||
this.fs = fs;
|
this.fs = fs;
|
||||||
this.rootdir = rootdir;
|
this.rootdir = rootdir;
|
||||||
this.fsreadonly = fsreadonly;
|
this.fsreadonly = fsreadonly;
|
||||||
|
|
||||||
|
this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -158,7 +167,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
invocations++;
|
invocations++;
|
||||||
if (TableName.META_TABLE_NAME.equals(tablename)) {
|
if (TableName.META_TABLE_NAME.equals(tablename)) {
|
||||||
cachehits++;
|
cachehits++;
|
||||||
return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
|
return new TableDescriptor(metaTableDescritor, TableState.State.ENABLED);
|
||||||
}
|
}
|
||||||
// hbase:meta is already handled. If some one tries to get the descriptor for
|
// hbase:meta is already handled. If some one tries to get the descriptor for
|
||||||
// .logs, .oldlogs or .corrupt throw an exception.
|
// .logs, .oldlogs or .corrupt throw an exception.
|
||||||
|
@ -204,7 +213,7 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
public HTableDescriptor get(TableName tableName) throws IOException {
|
public HTableDescriptor get(TableName tableName) throws IOException {
|
||||||
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
||||||
cachehits++;
|
cachehits++;
|
||||||
return HTableDescriptor.META_TABLEDESC;
|
return metaTableDescritor;
|
||||||
}
|
}
|
||||||
TableDescriptor descriptor = getDescriptor(tableName);
|
TableDescriptor descriptor = getDescriptor(tableName);
|
||||||
return descriptor == null ? null : descriptor.getHTableDescriptor();
|
return descriptor == null ? null : descriptor.getHTableDescriptor();
|
||||||
|
@ -826,6 +835,6 @@ public class FSTableDescriptors implements TableDescriptors {
|
||||||
Path p = writeTableDescriptor(fs, htd, tableDir, status);
|
Path p = writeTableDescriptor(fs, htd, tableDir, status);
|
||||||
return p != null;
|
return p != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1182,10 +1182,10 @@ public class HBaseFsck extends Configured {
|
||||||
Path rootdir = FSUtils.getRootDir(getConf());
|
Path rootdir = FSUtils.getRootDir(getConf());
|
||||||
Configuration c = getConf();
|
Configuration c = getConf();
|
||||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||||
MasterFileSystem.setInfoFamilyCachingForMeta(false);
|
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
|
||||||
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c,
|
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
|
||||||
HTableDescriptor.META_TABLEDESC);
|
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor);
|
||||||
MasterFileSystem.setInfoFamilyCachingForMeta(true);
|
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
|
||||||
return meta;
|
return meta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
||||||
|
@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
||||||
public class MetaUtils {
|
public class MetaUtils {
|
||||||
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
private static final Log LOG = LogFactory.getLog(MetaUtils.class);
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
private final FSTableDescriptors descriptors;
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private HLog log;
|
private HLog log;
|
||||||
private HRegion metaRegion;
|
private HRegion metaRegion;
|
||||||
|
@ -69,6 +71,7 @@ public class MetaUtils {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
|
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
|
||||||
this.metaRegion = null;
|
this.metaRegion = null;
|
||||||
|
this.descriptors = new FSTableDescriptors(conf);
|
||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +150,7 @@ public class MetaUtils {
|
||||||
return this.metaRegion;
|
return this.metaRegion;
|
||||||
}
|
}
|
||||||
this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
HTableDescriptor.META_TABLEDESC, getLog(),
|
descriptors.get(TableName.META_TABLE_NAME), getLog(),
|
||||||
this.conf);
|
this.conf);
|
||||||
this.metaRegion.compactStores();
|
this.metaRegion.compactStores();
|
||||||
return this.metaRegion;
|
return this.metaRegion;
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
|
||||||
|
@ -74,6 +75,14 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
|
protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
|
||||||
|
|
||||||
public volatile Configuration conf = HBaseConfiguration.create();
|
public volatile Configuration conf = HBaseConfiguration.create();
|
||||||
|
public final FSTableDescriptors fsTableDescriptors;
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
fsTableDescriptors = new FSTableDescriptors(conf);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException("Failed to init descriptors", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
public HBaseTestCase() {
|
public HBaseTestCase() {
|
||||||
|
@ -630,8 +639,9 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected void createMetaRegion() throws IOException {
|
protected void createMetaRegion() throws IOException {
|
||||||
|
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf);
|
||||||
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
||||||
conf, HTableDescriptor.META_TABLEDESC);
|
conf, fsTableDescriptors.get(TableName.META_TABLE_NAME) );
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void closeRootAndMeta() throws IOException {
|
protected void closeRootAndMeta() throws IOException {
|
||||||
|
|
|
@ -94,6 +94,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.tool.Canary;
|
import org.apache.hadoop.hbase.tool.Canary;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
|
||||||
|
@ -368,6 +369,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
return new Path(fs.getWorkingDirectory(), "test-data");
|
return new Path(fs.getWorkingDirectory(), "test-data");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return META table descriptor
|
||||||
|
*/
|
||||||
|
public HTableDescriptor getMetaTableDescriptor() {
|
||||||
|
try {
|
||||||
|
return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException("Unable to create META table descriptor", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Where the DFS cluster will write data on the local subsystem.
|
* @return Where the DFS cluster will write data on the local subsystem.
|
||||||
* Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()}
|
* Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()}
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class TestFSTableDescriptorForceCreation {
|
||||||
final String name = "newTable2";
|
final String name = "newTable2";
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
|
|
||||||
assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
|
assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
|
||||||
|
@ -54,7 +54,7 @@ public class TestFSTableDescriptorForceCreation {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
// Cleanup old tests if any detritus laying around.
|
// Cleanup old tests if any detritus laying around.
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(name);
|
HTableDescriptor htd = new HTableDescriptor(name);
|
||||||
fstd.add(htd);
|
fstd.add(htd);
|
||||||
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
|
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
|
||||||
|
@ -66,7 +66,7 @@ public class TestFSTableDescriptorForceCreation {
|
||||||
final String name = "createNewTableNew2";
|
final String name = "createNewTableNew2";
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
fstd.createTableDescriptor(htd, false);
|
fstd.createTableDescriptor(htd, false);
|
||||||
assertTrue("Should create new table descriptor",
|
assertTrue("Should create new table descriptor",
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class TestTableDescriptor {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPb() throws DeserializationException, IOException {
|
public void testPb() throws DeserializationException, IOException {
|
||||||
HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC);
|
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
|
||||||
final int v = 123;
|
final int v = 123;
|
||||||
htd.setMaxFileSize(v);
|
htd.setMaxFileSize(v);
|
||||||
htd.setDurability(Durability.ASYNC_WAL);
|
htd.setDurability(Durability.ASYNC_WAL);
|
||||||
|
|
|
@ -1162,7 +1162,7 @@ public class TestAdmin {
|
||||||
public void testCreateBadTables() throws IOException {
|
public void testCreateBadTables() throws IOException {
|
||||||
String msg = null;
|
String msg = null;
|
||||||
try {
|
try {
|
||||||
this.admin.createTable(HTableDescriptor.META_TABLEDESC);
|
this.admin.createTable(new HTableDescriptor(TableName.META_TABLE_NAME));
|
||||||
} catch(TableExistsException e) {
|
} catch(TableExistsException e) {
|
||||||
msg = e.toString();
|
msg = e.toString();
|
||||||
}
|
}
|
||||||
|
|
|
@ -227,7 +227,7 @@ public class TestMasterFailover {
|
||||||
|
|
||||||
FileSystem filesystem = FileSystem.get(conf);
|
FileSystem filesystem = FileSystem.get(conf);
|
||||||
Path rootdir = FSUtils.getRootDir(conf);
|
Path rootdir = FSUtils.getRootDir(conf);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir);
|
||||||
fstd.createTableDescriptor(offlineTable);
|
fstd.createTableDescriptor(offlineTable);
|
||||||
|
|
||||||
HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
|
HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
|
||||||
|
|
|
@ -69,10 +69,10 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
FileSystem filesystem = FileSystem.get(conf);
|
FileSystem filesystem = FileSystem.get(conf);
|
||||||
Path rootdir = testDir;
|
Path rootdir = testDir;
|
||||||
// Up flush size else we bind up when we use default catalog flush of 16k.
|
// Up flush size else we bind up when we use default catalog flush of 16k.
|
||||||
HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024);
|
fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024);
|
||||||
|
|
||||||
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
rootdir, this.conf, HTableDescriptor.META_TABLEDESC);
|
rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME));
|
||||||
try {
|
try {
|
||||||
// Write rows for three tables 'A', 'B', and 'C'.
|
// Write rows for three tables 'A', 'B', and 'C'.
|
||||||
for (char c = 'A'; c < 'D'; c++) {
|
for (char c = 'A'; c < 'D'; c++) {
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.RegionState;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -63,13 +64,14 @@ public class TestHRegionInfo {
|
||||||
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
|
HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
|
||||||
Path basedir = htu.getDataTestDir();
|
Path basedir = htu.getDataTestDir();
|
||||||
// Create a region. That'll write the .regioninfo file.
|
// Create a region. That'll write the .regioninfo file.
|
||||||
|
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
|
||||||
HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
|
HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
|
||||||
HTableDescriptor.META_TABLEDESC);
|
fsTableDescriptors.get(TableName.META_TABLE_NAME));
|
||||||
// Get modtime on the file.
|
// Get modtime on the file.
|
||||||
long modtime = getModTime(r);
|
long modtime = getModTime(r);
|
||||||
HRegion.closeHRegion(r);
|
HRegion.closeHRegion(r);
|
||||||
Thread.sleep(1001);
|
Thread.sleep(1001);
|
||||||
r = HRegion.openHRegion(basedir, hri, HTableDescriptor.META_TABLEDESC,
|
r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
|
||||||
null, htu.getConfiguration());
|
null, htu.getConfiguration());
|
||||||
// Ensure the file is not written for a second time.
|
// Ensure the file is not written for a second time.
|
||||||
long modtime2 = getModTime(r);
|
long modtime2 = getModTime(r);
|
||||||
|
|
|
@ -128,7 +128,7 @@ public class TestLogRollingNoCluster {
|
||||||
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
|
||||||
this.wal.append(HRegionInfo.FIRST_META_REGIONINFO,
|
this.wal.append(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
TableName.META_TABLE_NAME,
|
TableName.META_TABLE_NAME,
|
||||||
edit, now, HTableDescriptor.META_TABLEDESC, sequenceId);
|
edit, now, TEST_UTIL.getMetaTableDescriptor(), sequenceId);
|
||||||
}
|
}
|
||||||
String msg = getName() + " finished";
|
String msg = getName() + " finished";
|
||||||
if (isException())
|
if (isException())
|
||||||
|
|
|
@ -59,7 +59,7 @@ public class TestReplicationWALEntryFilters {
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
HLogKey key1 = new HLogKey( HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
|
HLogKey key1 = new HLogKey( HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
|
||||||
HTableDescriptor.META_TABLEDESC.getTableName());
|
TableName.META_TABLE_NAME);
|
||||||
HLog.Entry metaEntry = new Entry(key1, null);
|
HLog.Entry metaEntry = new Entry(key1, null);
|
||||||
|
|
||||||
assertNull(filter.filter(metaEntry));
|
assertNull(filter.filter(metaEntry));
|
||||||
|
|
|
@ -76,7 +76,7 @@ public class TestFSTableDescriptors {
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate"));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate"));
|
||||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||||
assertTrue(fstd.createTableDescriptor(td));
|
assertTrue(fstd.createTableDescriptor(td));
|
||||||
assertFalse(fstd.createTableDescriptor(td));
|
assertFalse(fstd.createTableDescriptor(td));
|
||||||
FileStatus [] statuses = fs.listStatus(testdir);
|
FileStatus [] statuses = fs.listStatus(testdir);
|
||||||
|
@ -98,7 +98,7 @@ public class TestFSTableDescriptors {
|
||||||
TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
|
TableName.valueOf("testSequenceidAdvancesOnTableInfo"));
|
||||||
TableDescriptor td = new TableDescriptor(htd);
|
TableDescriptor td = new TableDescriptor(htd);
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||||
Path p0 = fstd.updateTableDescriptor(td);
|
Path p0 = fstd.updateTableDescriptor(td);
|
||||||
int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
|
int i0 = FSTableDescriptors.getTableInfoSequenceId(p0);
|
||||||
Path p1 = fstd.updateTableDescriptor(td);
|
Path p1 = fstd.updateTableDescriptor(td);
|
||||||
|
@ -159,7 +159,7 @@ public class TestFSTableDescriptors {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
// Cleanup old tests if any detrius laying around.
|
// Cleanup old tests if any detrius laying around.
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
htds.add(htd);
|
htds.add(htd);
|
||||||
assertNotNull(htds.remove(htd.getTableName()));
|
assertNotNull(htds.remove(htd.getTableName()));
|
||||||
|
@ -172,7 +172,7 @@ public class TestFSTableDescriptors {
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||||
Path rootdir = UTIL.getDataTestDir(name);
|
Path rootdir = UTIL.getDataTestDir(name);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
fstd.createTableDescriptor(td);
|
fstd.createTableDescriptor(td);
|
||||||
TableDescriptor td2 =
|
TableDescriptor td2 =
|
||||||
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
|
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
|
||||||
|
@ -183,14 +183,14 @@ public class TestFSTableDescriptors {
|
||||||
final String name = "testReadingOldHTDFromFS";
|
final String name = "testReadingOldHTDFromFS";
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
Path rootdir = UTIL.getDataTestDir(name);
|
Path rootdir = UTIL.getDataTestDir(name);
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||||
Path descriptorFile = fstd.updateTableDescriptor(td);
|
Path descriptorFile = fstd.updateTableDescriptor(td);
|
||||||
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
|
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
|
||||||
out.write(htd.toByteArray());
|
out.write(htd.toByteArray());
|
||||||
}
|
}
|
||||||
FSTableDescriptors fstd2 = new FSTableDescriptors(fs, rootdir);
|
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
TableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
|
TableDescriptor td2 = fstd2.getDescriptor(htd.getTableName());
|
||||||
assertEquals(td, td2);
|
assertEquals(td, td2);
|
||||||
FileStatus descriptorFile2 =
|
FileStatus descriptorFile2 =
|
||||||
|
@ -209,7 +209,7 @@ public class TestFSTableDescriptors {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
// Cleanup old tests if any debris laying around.
|
// Cleanup old tests if any debris laying around.
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) {
|
FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) {
|
||||||
@Override
|
@Override
|
||||||
public HTableDescriptor get(TableName tablename)
|
public HTableDescriptor get(TableName tablename)
|
||||||
throws TableExistsException, FileNotFoundException, IOException {
|
throws TableExistsException, FileNotFoundException, IOException {
|
||||||
|
@ -256,7 +256,7 @@ public class TestFSTableDescriptors {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
// Cleanup old tests if any detrius laying around.
|
// Cleanup old tests if any detrius laying around.
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
assertNull("There shouldn't be any HTD for this table",
|
assertNull("There shouldn't be any HTD for this table",
|
||||||
htds.get(TableName.valueOf("NoSuchTable")));
|
htds.get(TableName.valueOf("NoSuchTable")));
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ public class TestFSTableDescriptors {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
// Cleanup old tests if any detrius laying around.
|
// Cleanup old tests if any detrius laying around.
|
||||||
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
Path rootdir = new Path(UTIL.getDataTestDir(), name);
|
||||||
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
|
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
|
||||||
htds.add(htd);
|
htds.add(htd);
|
||||||
htds.add(htd);
|
htds.add(htd);
|
||||||
|
@ -304,8 +304,8 @@ public class TestFSTableDescriptors {
|
||||||
public void testReadingInvalidDirectoryFromFS() throws IOException {
|
public void testReadingInvalidDirectoryFromFS() throws IOException {
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
try {
|
try {
|
||||||
// .tmp dir is an invalid table name
|
new FSTableDescriptors(UTIL.getConfiguration(), fs,
|
||||||
new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration()))
|
FSUtils.getRootDir(UTIL.getConfiguration()))
|
||||||
.get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY));
|
.get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY));
|
||||||
fail("Shouldn't be able to read a table descriptor for the archive directory.");
|
fail("Shouldn't be able to read a table descriptor for the archive directory.");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -321,7 +321,7 @@ public class TestFSTableDescriptors {
|
||||||
"testCreateTableDescriptorUpdatesIfThereExistsAlready"));
|
"testCreateTableDescriptorUpdatesIfThereExistsAlready"));
|
||||||
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED);
|
||||||
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
|
||||||
FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir);
|
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
|
||||||
assertTrue(fstd.createTableDescriptor(td));
|
assertTrue(fstd.createTableDescriptor(td));
|
||||||
assertFalse(fstd.createTableDescriptor(td));
|
assertFalse(fstd.createTableDescriptor(td));
|
||||||
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
|
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
|
||||||
|
|
|
@ -2439,7 +2439,7 @@ public class TestHBaseFsck {
|
||||||
LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
|
LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
|
||||||
Path rootDir = FSUtils.getRootDir(conf);
|
Path rootDir = FSUtils.getRootDir(conf);
|
||||||
FileSystem fs = rootDir.getFileSystem(conf);
|
FileSystem fs = rootDir.getFileSystem(conf);
|
||||||
Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(),
|
Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
|
||||||
hri.getEncodedName());
|
hri.getEncodedName());
|
||||||
Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
|
Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
|
||||||
fs.delete(hriPath, true);
|
fs.delete(hriPath, true);
|
||||||
|
@ -2449,7 +2449,7 @@ public class TestHBaseFsck {
|
||||||
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
|
LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
|
||||||
Path rootDir = FSUtils.getRootDir(conf);
|
Path rootDir = FSUtils.getRootDir(conf);
|
||||||
FileSystem fs = rootDir.getFileSystem(conf);
|
FileSystem fs = rootDir.getFileSystem(conf);
|
||||||
Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(),
|
Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(),
|
||||||
hri.getEncodedName());
|
hri.getEncodedName());
|
||||||
HBaseFsck.debugLsr(conf, p);
|
HBaseFsck.debugLsr(conf, p);
|
||||||
boolean success = fs.delete(p, true);
|
boolean success = fs.delete(p, true);
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class TestMergeTable {
|
||||||
|
|
||||||
// Create regions and populate them at same time. Create the tabledir
|
// Create regions and populate them at same time. Create the tabledir
|
||||||
// for them first.
|
// for them first.
|
||||||
new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc);
|
new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc);
|
||||||
HRegion [] regions = {
|
HRegion [] regions = {
|
||||||
createRegion(desc, null, row_70001, 1, 70000, rootdir),
|
createRegion(desc, null, row_70001, 1, 70000, rootdir),
|
||||||
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
|
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
|
||||||
|
@ -161,7 +161,7 @@ public class TestMergeTable {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegion meta =
|
HRegion meta =
|
||||||
HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
|
HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
|
||||||
UTIL.getConfiguration(), HTableDescriptor.META_TABLEDESC);
|
UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
|
||||||
for (HRegion r: regions) {
|
for (HRegion r: regions) {
|
||||||
HRegion.addRegionToMETA(meta, r);
|
HRegion.addRegionToMETA(meta, r);
|
||||||
}
|
}
|
||||||
|
|
|
@ -147,7 +147,7 @@ public class TestMergeTool extends HBaseTestCase {
|
||||||
try {
|
try {
|
||||||
// Create meta region
|
// Create meta region
|
||||||
createMetaRegion();
|
createMetaRegion();
|
||||||
new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(
|
new FSTableDescriptors(this.conf, this.fs, this.testDir).createTableDescriptor(
|
||||||
new TableDescriptor(this.desc));
|
new TableDescriptor(this.desc));
|
||||||
/*
|
/*
|
||||||
* Create the regions we will merge
|
* Create the regions we will merge
|
||||||
|
|
Loading…
Reference in New Issue