HBASE-8408 Implement namespace

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1511577 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-08-08 04:19:49 +00:00
parent d363e852e6
commit 46fb7c409d
402 changed files with 17519 additions and 4325 deletions

View File

@ -51,5 +51,5 @@ public interface CoprocessorEnvironment {
* @return an interface for accessing the given table
* @throws IOException
*/
HTableInterface getTable(byte[] tableName) throws IOException;
HTableInterface getTable(TableName tableName) throws IOException;
}

View File

@ -189,16 +189,15 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
private byte [] encodedNameAsBytes = null;
// Current TableName
private byte[] tableName = null;
private String tableNameAsString = null;
private TableName tableName = null;
/** HRegionInfo for root region */
public static final HRegionInfo ROOT_REGIONINFO =
new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
new HRegionInfo(0L, TableName.ROOT_TABLE_NAME);
/** HRegionInfo for first meta region */
public static final HRegionInfo FIRST_META_REGIONINFO =
new HRegionInfo(1L, Bytes.toBytes(".META."));
new HRegionInfo(1L, TableName.META_TABLE_NAME);
private void setHashCode() {
int result = Arrays.hashCode(this.regionName);
@ -206,7 +205,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
result ^= Arrays.hashCode(this.startKey);
result ^= Arrays.hashCode(this.endKey);
result ^= Boolean.valueOf(this.offLine).hashCode();
result ^= Arrays.hashCode(this.tableName);
result ^= Arrays.hashCode(this.tableName.getName());
this.hashCode = result;
}
@ -215,10 +214,10 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* Private constructor used constructing HRegionInfo for the
* first meta regions
*/
private HRegionInfo(long regionId, byte[] tableName) {
private HRegionInfo(long regionId, TableName tableName) {
super();
this.regionId = regionId;
this.tableName = tableName.clone();
this.tableName = tableName;
// Note: First Meta regions names are still in old format
this.regionName = createRegionName(tableName, null,
regionId, false);
@ -234,7 +233,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
super();
}
public HRegionInfo(final byte[] tableName) {
public HRegionInfo(final TableName tableName) {
this(tableName, null, null);
}
@ -246,7 +245,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* @param endKey end of key range
* @throws IllegalArgumentException
*/
public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey)
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey)
throws IllegalArgumentException {
this(tableName, startKey, endKey, false);
}
@ -262,7 +261,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* regions that may or may not hold references to this region.
* @throws IllegalArgumentException
*/
public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey,
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
final boolean split)
throws IllegalArgumentException {
this(tableName, startKey, endKey, split, System.currentTimeMillis());
@ -280,15 +279,15 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* @param regionid Region id to use.
* @throws IllegalArgumentException
*/
public HRegionInfo(final byte[] tableName, final byte[] startKey,
public HRegionInfo(final TableName tableName, final byte[] startKey,
final byte[] endKey, final boolean split, final long regionid)
throws IllegalArgumentException {
super();
if (tableName == null) {
throw new IllegalArgumentException("tableName cannot be null");
throw new IllegalArgumentException("TableName cannot be null");
}
this.tableName = tableName.clone();
this.tableName = tableName;
this.offLine = false;
this.regionId = regionid;
@ -299,7 +298,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
this.startKey = startKey == null?
HConstants.EMPTY_START_ROW: startKey.clone();
this.tableName = tableName.clone();
this.tableName = tableName;
setHashCode();
}
@ -332,7 +331,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* (such that it contains its encoded name?).
* @return Region name made of passed tableName, startKey and id
*/
public static byte [] createRegionName(final byte [] tableName,
public static byte [] createRegionName(final TableName tableName,
final byte [] startKey, final long regionid, boolean newFormat) {
return createRegionName(tableName, startKey, Long.toString(regionid), newFormat);
}
@ -346,7 +345,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* (such that it contains its encoded name?).
* @return Region name made of passed tableName, startKey and id
*/
public static byte [] createRegionName(final byte [] tableName,
public static byte [] createRegionName(final TableName tableName,
final byte [] startKey, final String id, boolean newFormat) {
return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat);
}
@ -360,14 +359,14 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* (such that it contains its encoded name?).
* @return Region name made of passed tableName, startKey and id
*/
public static byte [] createRegionName(final byte [] tableName,
public static byte [] createRegionName(final TableName tableName,
final byte [] startKey, final byte [] id, boolean newFormat) {
byte [] b = new byte [tableName.length + 2 + id.length +
byte [] b = new byte [tableName.getName().length + 2 + id.length +
(startKey == null? 0: startKey.length) +
(newFormat ? (MD5_HEX_LENGTH + 2) : 0)];
int offset = tableName.length;
System.arraycopy(tableName, 0, b, 0, offset);
int offset = tableName.getName().length;
System.arraycopy(tableName.getName(), 0, b, 0, offset);
b[offset++] = HConstants.DELIMITER;
if (startKey != null && startKey.length > 0) {
System.arraycopy(startKey, 0, b, offset, startKey.length);
@ -408,7 +407,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* @param regionName
* @return Table name.
*/
public static byte [] getTableName(byte [] regionName) {
public static TableName getTableName(byte[] regionName) {
int offset = -1;
for (int i = 0; i < regionName.length; i++) {
if (regionName[i] == HConstants.DELIMITER) {
@ -416,9 +415,9 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
break;
}
}
byte [] tableName = new byte[offset];
System.arraycopy(regionName, 0, tableName, 0, offset);
return tableName;
byte[] buff = new byte[offset];
System.arraycopy(regionName, 0, buff, 0, offset);
return TableName.valueOf(buff);
}
/**
@ -446,7 +445,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
}
}
if(offset == -1) throw new IOException("Invalid regionName format");
byte [] tableName = new byte[offset];
byte[] tableName = new byte[offset];
System.arraycopy(regionName, 0, tableName, 0, offset);
offset = -1;
for (int i = regionName.length - 1; i > 0; i--) {
@ -529,24 +528,13 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* Get current table name of the region
* @return byte array of table name
*/
public byte[] getTableName() {
if (tableName == null || tableName.length == 0) {
public TableName getTableName() {
if (tableName == null || tableName.getName().length == 0) {
tableName = getTableName(getRegionName());
}
return tableName;
}
/**
* Get current table name as string
* @return string representation of current table
*/
public String getTableNameAsString() {
if (tableNameAsString == null) {
tableNameAsString = Bytes.toString(tableName);
}
return tableNameAsString;
}
/**
* Returns true if the given inclusive range of rows is fully contained
* by this region. For example, if the region is foo,a,g and this is
@ -586,7 +574,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
/** @return true if this region is a meta region */
public boolean isMetaRegion() {
return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName());
return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTableName());
}
/**
@ -690,7 +678,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
Bytes.writeByteArray(out, regionName);
out.writeBoolean(split);
Bytes.writeByteArray(out, startKey);
Bytes.writeByteArray(out, tableName);
Bytes.writeByteArray(out, tableName.getName());
out.writeInt(hashCode);
}
@ -717,7 +705,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
try {
HTableDescriptor htd = new HTableDescriptor();
htd.readFields(in);
this.tableName = htd.getName();
this.tableName = htd.getTableName();
} catch(EOFException eofe) {
throw new IOException("HTD not found in input buffer", eofe);
}
@ -730,7 +718,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
this.regionNameStr = Bytes.toStringBinary(this.regionName);
this.split = in.readBoolean();
this.startKey = Bytes.readByteArray(in);
this.tableName = Bytes.readByteArray(in);
this.tableName = TableName.valueOf(Bytes.readByteArray(in));
this.hashCode = in.readInt();
} else {
throw new IOException("Non-migratable/unknown version=" + getVersion());
@ -762,7 +750,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
}
// Are regions of same table?
int result = Bytes.compareTo(this.tableName, o.tableName);
int result = this.tableName.compareTo(o.tableName);
if (result != 0) {
return result;
}
@ -829,7 +817,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
public static RegionInfo convert(final HRegionInfo info) {
if (info == null) return null;
RegionInfo.Builder builder = RegionInfo.newBuilder();
builder.setTableName(ByteString.copyFrom(info.getTableName()));
builder.setTableName(ProtobufUtil.toProtoTableName(info.getTableName()));
builder.setRegionId(info.getRegionId());
if (info.getStartKey() != null) {
builder.setStartKey(ByteString.copyFrom(info.getStartKey()));
@ -846,12 +834,13 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
* Convert a RegionInfo to a HRegionInfo
*
* @param proto the RegionInfo to convert
* @return the converted HRegionInfo
* @return the converted HRegionInfho
*/
public static HRegionInfo convert(final RegionInfo proto) {
if (proto == null) return null;
byte [] tableName = proto.getTableName().toByteArray();
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
TableName tableName =
ProtobufUtil.toTableName(proto.getTableName());
if (tableName.equals(TableName.META_TABLE_NAME)) {
return FIRST_META_REGIONINFO;
}
long regionId = proto.getRegionId();
@ -867,7 +856,10 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
if (proto.hasSplit()) {
split = proto.getSplit();
}
HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId);
HRegionInfo hri = new HRegionInfo(
tableName,
startKey,
endKey, split, regionId);
if (proto.hasOffline()) {
hri.setOffline(proto.getOffline());
}

View File

@ -78,9 +78,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
private static final byte TABLE_DESCRIPTOR_VERSION = 7;
private byte [] name = HConstants.EMPTY_BYTE_ARRAY;
private String nameAsString = "";
private TableName name = null;
/**
* A map which holds the metadata information of the table. This metadata
@ -260,10 +258,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* <em> INTERNAL </em> Private constructor used internally creating table descriptors for
* catalog tables, <code>.META.</code> and <code>-ROOT-</code>.
*/
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
this.name = name.clone();
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(name);
protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) {
setName(name);
for(HColumnDescriptor descriptor : families) {
this.families.put(descriptor.getName(), descriptor);
}
@ -273,11 +269,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* <em> INTERNAL </em>Private constructor used internally creating table descriptors for
* catalog tables, <code>.META.</code> and <code>-ROOT-</code>.
*/
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
this.name = name.clone();
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(name);
setName(name);
for(HColumnDescriptor descriptor : families) {
this.families.put(descriptor.getName(), descriptor);
}
@ -290,7 +284,6 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Default constructor which constructs an empty object.
* For deserializing an HTableDescriptor instance only.
* @see #HTableDescriptor(byte[])
* @deprecated Used by Writables and Writables are going away.
*/
@Deprecated
@ -299,30 +292,33 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Construct a table descriptor specifying table name.
* Construct a table descriptor specifying a TableName object
* @param name Table name.
* @throws IllegalArgumentException if passed a table name
* that is made of other than 'word' characters, underscore or period: i.e.
* <code>[a-zA-Z_0-9.].
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
public HTableDescriptor(final String name) {
this(Bytes.toBytes(name));
public HTableDescriptor(final TableName name) {
super();
setName(name);
}
/**
* Construct a table descriptor specifying a byte array table name
* @param name - Table name as a byte array.
* @throws IllegalArgumentException if passed a table name
* that is made of other than 'word' characters, underscore or period: i.e.
* <code>[a-zA-Z_0-9-.].
* @param name Table name.
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
public HTableDescriptor(final byte [] name) {
super();
setMetaFlags(this.name);
this.name = this.isMetaRegion()? name: isLegalTableName(name);
this.nameAsString = Bytes.toString(this.name);
@Deprecated
public HTableDescriptor(final byte[] name) {
this(TableName.valueOf(name));
}
/**
* Construct a table descriptor specifying a String table name
* @param name Table name.
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
@Deprecated
public HTableDescriptor(final String name) {
this(TableName.valueOf(name));
}
/**
@ -334,8 +330,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public HTableDescriptor(final HTableDescriptor desc) {
super();
this.name = desc.name.clone();
this.nameAsString = Bytes.toString(this.name);
setName(desc.name);
setMetaFlags(this.name);
for (HColumnDescriptor c: desc.families.values()) {
this.families.put(c.getName(), new HColumnDescriptor(c));
@ -356,10 +351,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* Called by constructors.
* @param name
*/
private void setMetaFlags(final byte [] name) {
setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME));
private void setMetaFlags(final TableName name) {
setMetaRegion(isRootRegion() ||
Bytes.equals(name, HConstants.META_TABLE_NAME));
name.equals(TableName.META_TABLE_NAME));
}
/**
@ -387,10 +381,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Checks if this table is either <code> -ROOT- </code> or <code> .META. </code>
* Checks if this table is <code> .META. </code>
* region.
*
* @return true if this is either a <code> -ROOT- </code> or <code> .META. </code>
* @return true if this table is <code> .META. </code>
* region
*/
public boolean isMetaRegion() {
@ -436,56 +430,15 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
/**
* Checks of the tableName being passed represents either
* <code > -ROOT- </code> or <code> .META. </code>
* Checks of the tableName being passed is a system table
*
* @return true if a tablesName is either <code> -ROOT- </code>
* or <code> .META. </code>
*
* @return true if a tableName is a member of the system
* namesapce (aka hbase)
*/
public static boolean isMetaTable(final byte [] tableName) {
return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) ||
Bytes.equals(tableName, HConstants.META_TABLE_NAME);
}
// A non-capture group so that this can be embedded.
public static final String VALID_USER_TABLE_REGEX = "(?:[a-zA-Z_0-9][a-zA-Z_0-9.-]*)";
/**
* Check passed byte buffer, "tableName", is legal user-space table name.
* @return Returns passed <code>tableName</code> param
* @throws NullPointerException If passed <code>tableName</code> is null
* @throws IllegalArgumentException if passed a tableName
* that is made of other than 'word' characters or underscores: i.e.
* <code>[a-zA-Z_0-9].
*/
public static byte [] isLegalTableName(final byte [] tableName) {
if (tableName == null || tableName.length <= 0) {
throw new IllegalArgumentException("Name is null or empty");
}
if (tableName[0] == '.' || tableName[0] == '-') {
throw new IllegalArgumentException("Illegal first character <" + tableName[0] +
"> at 0. User-space table names can only start with 'word " +
"characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(tableName));
}
if (HConstants.CLUSTER_ID_FILE_NAME.equalsIgnoreCase(Bytes
.toString(tableName))
|| HConstants.SPLIT_LOGDIR_NAME.equalsIgnoreCase(Bytes
.toString(tableName))
|| HConstants.VERSION_FILE_NAME.equalsIgnoreCase(Bytes
.toString(tableName))) {
throw new IllegalArgumentException(Bytes.toString(tableName)
+ " conflicted with system reserved words");
}
for (int i = 0; i < tableName.length; i++) {
if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' ||
tableName[i] == '-' || tableName[i] == '.') {
continue;
}
throw new IllegalArgumentException("Illegal character <" + tableName[i] +
"> at " + i + ". User-space table names can only contain " +
"'word characters': i.e. [a-zA-Z_0-9-.]: " + Bytes.toString(tableName));
}
return tableName;
public static boolean isSystemTable(final TableName tableName) {
return tableName.getNamespaceAsString()
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
}
/**
@ -709,13 +662,22 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
return this.durability;
}
/**
* Get the name of the table
*
* @return TableName
*/
public TableName getTableName() {
return name;
}
/**
* Get the name of the table as a byte array.
*
* @return name of table
*/
public byte [] getName() {
return name;
public byte[] getName() {
return name.getName();
}
/**
@ -724,7 +686,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @return name of table as a String
*/
public String getNameAsString() {
return this.nameAsString;
return name.getNameAsString();
}
/**
@ -744,9 +706,14 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*
* @param name name of table
*/
@Deprecated
public void setName(byte[] name) {
setName(TableName.valueOf(name));
}
@Deprecated
public void setName(TableName name) {
this.name = name;
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(this.name);
}
@ -839,7 +806,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
@Override
public String toString() {
StringBuilder s = new StringBuilder();
s.append('\'').append(Bytes.toString(name)).append('\'');
s.append('\'').append(Bytes.toString(name.getName())).append('\'');
s.append(getValues(true));
for (HColumnDescriptor f : families.values()) {
s.append(", ").append(f);
@ -853,7 +820,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public String toStringCustomizedValues() {
StringBuilder s = new StringBuilder();
s.append('\'').append(Bytes.toString(name)).append('\'');
s.append('\'').append(Bytes.toString(name.getName())).append('\'');
s.append(getValues(false));
for(HColumnDescriptor hcd : families.values()) {
s.append(", ").append(hcd.toStringCustomizedValues());
@ -978,7 +945,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
@Override
public int hashCode() {
int result = Bytes.hashCode(this.name);
int result = this.name.hashCode();
result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
if (this.families != null && this.families.size() > 0) {
for (HColumnDescriptor e: this.families.values()) {
@ -1002,8 +969,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
if (version < 3)
throw new IOException("versions < 3 are not supported (and never existed!?)");
// version 3+
name = Bytes.readByteArray(in);
nameAsString = Bytes.toString(this.name);
name = TableName.valueOf(Bytes.readByteArray(in));
setRootRegion(in.readBoolean());
setMetaRegion(in.readBoolean());
values.clear();
@ -1046,8 +1012,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
@Deprecated
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(TABLE_DESCRIPTOR_VERSION);
Bytes.writeByteArray(out, name);
out.writeInt(TABLE_DESCRIPTOR_VERSION);
Bytes.writeByteArray(out, name.toBytes());
out.writeBoolean(isRootRegion());
out.writeBoolean(isMetaRegion());
out.writeInt(values.size());
@ -1080,7 +1046,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
@Override
public int compareTo(final HTableDescriptor other) {
int result = Bytes.compareTo(this.name, other.name);
int result = this.name.compareTo(other.name);
if (result == 0) {
result = families.size() - other.families.size();
}
@ -1350,17 +1316,24 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* Returns the {@link Path} object representing the table directory under
* path rootdir
*
* Deprecated use FSUtils.getTableDir() instead.
*
* @param rootdir qualified path of HBase root directory
* @param tableName name of table
* @return {@link Path} for table
*/
@Deprecated
public static Path getTableDir(Path rootdir, final byte [] tableName) {
return new Path(rootdir, Bytes.toString(tableName));
//This is bad I had to mirror code from FSUTils.getTableDir since
//there is no module dependency between hbase-client and hbase-server
TableName name = TableName.valueOf(tableName);
return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString()))));
}
/** Table descriptor for <core>-ROOT-</code> catalog table */
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
HConstants.ROOT_TABLE_NAME,
TableName.ROOT_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
@ -1373,7 +1346,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/** Table descriptor for <code>.META.</code> catalog table */
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
TableName.META_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
@ -1395,6 +1369,21 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
}
}
public final static String NAMESPACE_FAMILY_INFO = "info";
public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
/** Table descriptor for namespace table */
public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor(
TableName.NAMESPACE_TABLE_NAME,
new HColumnDescriptor[] {
new HColumnDescriptor(NAMESPACE_FAMILY_INFO)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
});
@Deprecated
public void setOwner(User owner) {
@ -1458,7 +1447,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public TableSchema convert() {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setName(ByteString.copyFrom(getName()));
builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
aBuilder.setFirst(ByteString.copyFrom(e.getKey().get()));
@ -1488,7 +1477,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
for (ColumnFamilySchema cfs: list) {
hcds[index++] = HColumnDescriptor.convert(cfs);
}
HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds);
HTableDescriptor htd = new HTableDescriptor(
ProtobufUtil.toTableName(ts.getTableName()),
hcds);
for (BytesBytesPair a: ts.getAttributesList()) {
htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
}

View File

@ -16,6 +16,7 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
/**
* Thrown when a table exists but should not
@ -37,4 +38,8 @@ public class TableExistsException extends DoNotRetryIOException {
public TableExistsException(String s) {
super(s);
}
public TableExistsException(TableName t) {
this(t.getNameAsString());
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -48,4 +49,11 @@ public class TableNotDisabledException extends DoNotRetryIOException {
public TableNotDisabledException(byte[] tableName) {
this(Bytes.toString(tableName));
}
/**
* @param tableName Name of table that is not disabled
*/
public TableNotDisabledException(TableName tableName) {
this(tableName.getNameAsString());
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
@ -43,6 +44,13 @@ public class TableNotEnabledException extends DoNotRetryIOException {
super(s);
}
/**
* @param tableName Name of table that is not enabled
*/
public TableNotEnabledException(TableName tableName) {
this(tableName.getNameAsString());
}
/**
* @param tableName Name of table that is not enabled
*/

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
/** Thrown when a table can not be located */
@InterfaceAudience.Public
@ -36,4 +38,12 @@ public class TableNotFoundException extends DoNotRetryIOException {
public TableNotFoundException(String s) {
super(s);
}
public TableNotFoundException(byte[] tableName) {
super(Bytes.toString(tableName));
}
public TableNotFoundException(TableName tableName) {
super(tableName.getNameAsString());
}
}

View File

@ -21,6 +21,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -72,7 +73,7 @@ public class MetaReader {
* @throws IOException
*/
public static Map<HRegionInfo, ServerName> fullScan(
CatalogTracker catalogTracker, final Set<String> disabledTables)
CatalogTracker catalogTracker, final Set<TableName> disabledTables)
throws IOException {
return fullScan(catalogTracker, disabledTables, false);
}
@ -90,7 +91,7 @@ public class MetaReader {
* @throws IOException
*/
public static Map<HRegionInfo, ServerName> fullScan(
CatalogTracker catalogTracker, final Set<String> disabledTables,
CatalogTracker catalogTracker, final Set<TableName> disabledTables,
final boolean excludeOfflinedSplitParents)
throws IOException {
final Map<HRegionInfo, ServerName> regions =
@ -102,9 +103,9 @@ public class MetaReader {
Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(r);
HRegionInfo hri = region.getFirst();
if (hri == null) return true;
if (hri.getTableNameAsString() == null) return true;
if (hri.getTableName() == null) return true;
if (disabledTables.contains(
hri.getTableNameAsString())) return true;
hri.getTableName())) return true;
// Are we to include split parents in the list?
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
regions.put(hri, region.getSecond());
@ -159,7 +160,7 @@ public class MetaReader {
* @throws IOException
*/
private static HTable getHTable(final CatalogTracker catalogTracker,
final byte [] tableName)
final TableName tableName)
throws IOException {
// Passing the CatalogTracker's connection configuration ensures this
// HTable instance uses the CatalogTracker's connection.
@ -187,7 +188,7 @@ public class MetaReader {
*/
static HTable getMetaHTable(final CatalogTracker ct)
throws IOException {
return getHTable(ct, HConstants.META_TABLE_NAME);
return getHTable(ct, TableName.META_TABLE_NAME);
}
/**
@ -274,13 +275,12 @@ public class MetaReader {
* @throws IOException
*/
public static boolean tableExists(CatalogTracker catalogTracker,
String tableName)
final TableName tableName)
throws IOException {
if (tableName.equals(HTableDescriptor.META_TABLEDESC.getNameAsString())) {
if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
// Catalog tables always exist.
return true;
}
final byte [] tableNameBytes = Bytes.toBytes(tableName);
// Make a version of ResultCollectingVisitor that only collects the first
CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
private HRegionInfo current = null;
@ -293,7 +293,7 @@ public class MetaReader {
LOG.warn("No serialized HRegionInfo in " + r);
return true;
}
if (!isInsideTable(this.current, tableNameBytes)) return false;
if (!isInsideTable(this.current, tableName)) return false;
// Else call super and add this Result to the collection.
super.visit(r);
// Stop collecting regions from table after we get one.
@ -306,7 +306,7 @@ public class MetaReader {
this.results.add(this.current);
}
};
fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes));
fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName));
// If visitor has results >= 1 then table exists.
return visitor.getResults().size() >= 1;
}
@ -319,7 +319,7 @@ public class MetaReader {
* @throws IOException
*/
public static List<HRegionInfo> getTableRegions(CatalogTracker catalogTracker,
byte [] tableName)
TableName tableName)
throws IOException {
return getTableRegions(catalogTracker, tableName, false);
}
@ -334,7 +334,7 @@ public class MetaReader {
* @throws IOException
*/
public static List<HRegionInfo> getTableRegions(CatalogTracker catalogTracker,
byte [] tableName, final boolean excludeOfflinedSplitParents)
TableName tableName, final boolean excludeOfflinedSplitParents)
throws IOException {
List<Pair<HRegionInfo, ServerName>> result = null;
try {
@ -361,8 +361,8 @@ public class MetaReader {
* @return True if <code>current</code> tablename is equal to
* <code>tableName</code>
*/
static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) {
return Bytes.equals(tableName, current.getTableName());
static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
return tableName.equals(current.getTableName());
}
/**
@ -370,9 +370,9 @@ public class MetaReader {
* @return Place to start Scan in <code>.META.</code> when passed a
* <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
*/
static byte [] getTableStartRowForMeta(final byte [] tableName) {
byte [] startRow = new byte[tableName.length + 2];
System.arraycopy(tableName, 0, startRow, 0, tableName.length);
static byte [] getTableStartRowForMeta(TableName tableName) {
byte [] startRow = new byte[tableName.getName().length + 2];
System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
startRow[startRow.length - 2] = HConstants.DELIMITER;
startRow[startRow.length - 1] = HConstants.DELIMITER;
return startRow;
@ -387,8 +387,8 @@ public class MetaReader {
* @param tableName bytes of table's name
* @return configured Scan object
*/
public static Scan getScanForTableName(byte[] tableName) {
String strName = Bytes.toString(tableName);
public static Scan getScanForTableName(TableName tableName) {
String strName = tableName.getNameAsString();
// Start key is just the table name with delimiters
byte[] startKey = Bytes.toBytes(strName + ",,");
// Stop key appends the smallest possible char to the table name
@ -407,9 +407,9 @@ public class MetaReader {
* @throws InterruptedException
*/
public static List<Pair<HRegionInfo, ServerName>>
getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName)
getTableRegionsAndLocations(CatalogTracker catalogTracker, TableName tableName)
throws IOException, InterruptedException {
return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName),
return getTableRegionsAndLocations(catalogTracker, tableName,
true);
}
@ -422,9 +422,9 @@ public class MetaReader {
*/
public static List<Pair<HRegionInfo, ServerName>>
getTableRegionsAndLocations(final CatalogTracker catalogTracker,
final byte [] tableName, final boolean excludeOfflinedSplitParents)
final TableName tableName, final boolean excludeOfflinedSplitParents)
throws IOException, InterruptedException {
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
if (tableName.equals(TableName.META_TABLE_NAME)) {
// If meta, do a bit of special handling.
ServerName serverName = catalogTracker.getMetaLocation();
List<Pair<HRegionInfo, ServerName>> list =

View File

@ -23,6 +23,9 @@ package org.apache.hadoop.hbase.client;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@ -85,7 +88,7 @@ import java.util.concurrent.atomic.AtomicLong;
class AsyncProcess<CResult> {
private static final Log LOG = LogFactory.getLog(AsyncProcess.class);
protected final HConnection hConnection;
protected final byte[] tableName;
protected final TableName tableName;
protected final ExecutorService pool;
protected final AsyncProcessCallback<CResult> callback;
protected final BatchErrors errors = new BatchErrors();
@ -167,7 +170,7 @@ class AsyncProcess<CResult> {
}
}
public AsyncProcess(HConnection hc, byte[] tableName, ExecutorService pool,
public AsyncProcess(HConnection hc, TableName tableName, ExecutorService pool,
AsyncProcessCallback<CResult> callback, Configuration conf,
RpcRetryingCallerFactory rpcCaller) {
this.hConnection = hc;
@ -288,7 +291,7 @@ class AsyncProcess<CResult> {
loc = hConnection.locateRegion(this.tableName, row.getRow());
if (loc == null) {
locationException = new IOException("No location found, aborting submit for" +
" tableName=" + Bytes.toString(tableName) +
" tableName=" + tableName +
" rowkey=" + Arrays.toString(row.getRow()));
}
} catch (IOException e) {
@ -530,7 +533,7 @@ class AsyncProcess<CResult> {
if (toReplay.isEmpty()) {
LOG.warn("Attempt #" + numAttempt + "/" + numTries + " failed for all (" +
initialActions.size() + ") operations on server " + location.getServerName() +
" NOT resubmitting, tableName=" + Bytes.toString(tableName) + ", location=" + location);
" NOT resubmitting, tableName=" + tableName + ", location=" + location);
} else {
submit(initialActions, toReplay, numAttempt, true, errorsByServer);
}
@ -553,7 +556,7 @@ class AsyncProcess<CResult> {
if (responses == null) {
LOG.info("Attempt #" + numAttempt + "/" + numTries + " failed for all operations" +
" on server " + location.getServerName() + " , trying to resubmit," +
" tableName=" + Bytes.toString(tableName) + ", location=" + location);
" tableName=" + tableName + ", location=" + location);
resubmitAll(initialActions, rsActions, location, numAttempt + 1, null, errorsByServer);
return;
}
@ -614,7 +617,7 @@ class AsyncProcess<CResult> {
// logs as errors are to be expected wehn region moves, split and so on
LOG.debug("Attempt #" + numAttempt + "/" + numTries + " failed for " + failureCount +
" operations on server " + location.getServerName() + ", resubmitting " +
toReplay.size() + ", tableName=" + Bytes.toString(tableName) + ", location=" +
toReplay.size() + ", tableName=" + tableName + ", location=" +
location + ", last exception was: " + throwable +
" - sleeping " + backOffTime + " ms.");
}
@ -622,7 +625,7 @@ class AsyncProcess<CResult> {
Thread.sleep(backOffTime);
} catch (InterruptedException e) {
LOG.warn("Not sent: " + toReplay.size() +
" operations, tableName=" + Bytes.toString(tableName) + ", location=" + location, e);
" operations, tableName=" + tableName + ", location=" + location, e);
Thread.interrupted();
return;
}
@ -631,7 +634,7 @@ class AsyncProcess<CResult> {
} else if (failureCount != 0) {
LOG.warn("Attempt #" + numAttempt + "/" + numTries + " failed for " + failureCount +
" operations on server " + location.getServerName() + " NOT resubmitting." +
", tableName=" + Bytes.toString(tableName) + ", location=" + location);
", tableName=" + tableName + ", location=" + location);
}
}
@ -648,7 +651,7 @@ class AsyncProcess<CResult> {
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted." +
" currentNumberOfTask=" + currentNumberOfTask +
", tableName=" + Bytes.toString(tableName) + ", tasksDone=" + tasksDone.get());
", tableName=" + tableName + ", tasksDone=" + tasksDone.get());
}
}
}
@ -666,7 +669,7 @@ class AsyncProcess<CResult> {
lastLog = now;
LOG.info(": Waiting for the global number of running tasks to be equals or less than "
+ max + ", tasksSent=" + tasksSent.get() + ", tasksDone=" + tasksDone.get() +
", currentTasksDone=" + currentTasksDone + ", tableName=" + Bytes.toString(tableName));
", currentTasksDone=" + currentTasksDone + ", tableName=" + tableName);
}
waitForNextTaskDone(currentTasksDone);
currentTasksDone = this.tasksDone.get();

View File

@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -63,7 +64,7 @@ public class ClientScanner extends AbstractClientScanner {
private ScanMetrics scanMetrics = null;
private final long maxScannerResultSize;
private final HConnection connection;
private final byte[] tableName;
private final TableName tableName;
private final int scannerTimeout;
private boolean scanMetricsPublished = false;
private RpcRetryingCaller<Result []> caller;
@ -79,7 +80,7 @@ public class ClientScanner extends AbstractClientScanner {
* @throws IOException
*/
public ClientScanner(final Configuration conf, final Scan scan,
final byte[] tableName) throws IOException {
final TableName tableName) throws IOException {
this(conf, scan, tableName, HConnectionManager.getConnection(conf));
}
@ -94,7 +95,7 @@ public class ClientScanner extends AbstractClientScanner {
* @param connection Connection identifying the cluster
* @throws IOException
*/
public ClientScanner(final Configuration conf, final Scan scan, final byte[] tableName,
public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
HConnection connection) throws IOException {
this(conf, scan, tableName, connection, new RpcRetryingCallerFactory(conf));
}
@ -108,10 +109,10 @@ public class ClientScanner extends AbstractClientScanner {
* @param connection Connection identifying the cluster
* @throws IOException
*/
public ClientScanner(final Configuration conf, final Scan scan, final byte[] tableName,
public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
HConnection connection, RpcRetryingCallerFactory rpcFactory) throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan table=" + Bytes.toString(tableName)
LOG.trace("Scan table=" + tableName
+ ", startRow=" + Bytes.toStringBinary(scan.getStartRow()));
}
this.scan = scan;
@ -156,7 +157,7 @@ public class ClientScanner extends AbstractClientScanner {
return this.connection;
}
protected byte[] getTableName() {
protected TableName getTableName() {
return this.tableName;
}

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
@ -136,6 +137,9 @@ public interface HConnection extends Abortable, Closeable {
* @return true if the table is enabled, false otherwise
* @throws IOException if a remote or network exception occurs
*/
boolean isTableEnabled(TableName tableName) throws IOException;
@Deprecated
boolean isTableEnabled(byte[] tableName) throws IOException;
/**
@ -143,6 +147,9 @@ public interface HConnection extends Abortable, Closeable {
* @return true if the table is disabled, false otherwise
* @throws IOException if a remote or network exception occurs
*/
boolean isTableDisabled(TableName tableName) throws IOException;
@Deprecated
boolean isTableDisabled(byte[] tableName) throws IOException;
/**
@ -150,6 +157,9 @@ public interface HConnection extends Abortable, Closeable {
* @return true if all regions of the table are available, false otherwise
* @throws IOException if a remote or network exception occurs
*/
boolean isTableAvailable(TableName tableName) throws IOException;
@Deprecated
boolean isTableAvailable(byte[] tableName) throws IOException;
/**
@ -164,7 +174,12 @@ public interface HConnection extends Abortable, Closeable {
* @throws IOException
* if a remote or network exception occurs
*/
boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException;
boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws
IOException;
@Deprecated
boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws
IOException;
/**
* List all the userspace tables. In other words, scan the META table.
@ -183,6 +198,10 @@ public interface HConnection extends Abortable, Closeable {
* @return table metadata
* @throws IOException if a remote or network exception occurs
*/
HTableDescriptor getHTableDescriptor(TableName tableName)
throws IOException;
@Deprecated
HTableDescriptor getHTableDescriptor(byte[] tableName)
throws IOException;
@ -195,10 +214,12 @@ public interface HConnection extends Abortable, Closeable {
* question
* @throws IOException if a remote or network exception occurs
*/
HRegionLocation locateRegion(
final byte[] tableName, final byte[] row
)
throws IOException;
public HRegionLocation locateRegion(final TableName tableName,
final byte [] row) throws IOException;
@Deprecated
public HRegionLocation locateRegion(final byte[] tableName,
final byte [] row) throws IOException;
/**
* Allows flushing the region cache.
@ -211,6 +232,9 @@ public interface HConnection extends Abortable, Closeable {
* @param tableName Name of the table whose regions we are to remove from
* cache.
*/
void clearRegionCache(final TableName tableName);
@Deprecated
void clearRegionCache(final byte[] tableName);
/**
@ -228,10 +252,12 @@ public interface HConnection extends Abortable, Closeable {
* question
* @throws IOException if a remote or network exception occurs
*/
HRegionLocation relocateRegion(
final byte[] tableName, final byte[] row
)
throws IOException;
HRegionLocation relocateRegion(final TableName tableName,
final byte [] row) throws IOException;
@Deprecated
HRegionLocation relocateRegion(final byte[] tableName,
final byte [] row) throws IOException;
/**
* Update the location cache. This is used internally by HBase, in most cases it should not be
@ -241,9 +267,12 @@ public interface HConnection extends Abortable, Closeable {
* @param exception the exception if any. Can be null.
* @param source the previous location
*/
void updateCachedLocations(
byte[] tableName, byte[] rowkey, Object exception, HRegionLocation source
);
void updateCachedLocations(TableName tableName, byte[] rowkey,
Object exception, HRegionLocation source);
@Deprecated
void updateCachedLocations(byte[] tableName, byte[] rowkey,
Object exception, HRegionLocation source);
/**
* Gets the location of the region of <i>regionName</i>.
@ -261,8 +290,10 @@ public interface HConnection extends Abortable, Closeable {
* @return list of region locations for all regions of table
* @throws IOException
*/
List<HRegionLocation> locateRegions(final byte[] tableName)
throws IOException;
List<HRegionLocation> locateRegions(final TableName tableName) throws IOException;
@Deprecated
List<HRegionLocation> locateRegions(final byte[] tableName) throws IOException;
/**
* Gets the locations of all regions in the specified table, <i>tableName</i>.
@ -273,9 +304,14 @@ public interface HConnection extends Abortable, Closeable {
* @return list of region locations for all regions of table
* @throws IOException
*/
List<HRegionLocation> locateRegions(
final byte[] tableName, final boolean useCache, final boolean offlined
) throws IOException;
public List<HRegionLocation> locateRegions(final TableName tableName,
final boolean useCache,
final boolean offlined) throws IOException;
@Deprecated
public List<HRegionLocation> locateRegions(final byte[] tableName,
final boolean useCache,
final boolean offlined) throws IOException;
/**
* Returns a {@link MasterAdminKeepAliveConnection} to the active master
@ -325,7 +361,12 @@ public interface HConnection extends Abortable, Closeable {
* @return Location of row.
* @throws IOException if a remote or network exception occurs
*/
HRegionLocation getRegionLocation(byte [] tableName, byte [] row,
HRegionLocation getRegionLocation(TableName tableName, byte [] row,
boolean reload)
throws IOException;
@Deprecated
HRegionLocation getRegionLocation(byte[] tableName, byte [] row,
boolean reload)
throws IOException;
@ -345,10 +386,12 @@ public interface HConnection extends Abortable, Closeable {
* @deprecated since 0.96 - Use {@link HTableInterface#batch} instead
*/
@Deprecated
void processBatch(
List<? extends Row> actions, final byte[] tableName, ExecutorService pool, Object[] results
)
throws IOException, InterruptedException;
void processBatch(List<? extends Row> actions, final TableName tableName,
ExecutorService pool, Object[] results) throws IOException, InterruptedException;
@Deprecated
void processBatch(List<? extends Row> actions, final byte[] tableName,
ExecutorService pool, Object[] results) throws IOException, InterruptedException;
/**
* Parameterized batch processing, allowing varying return types for different
@ -356,13 +399,18 @@ public interface HConnection extends Abortable, Closeable {
* @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead
*/
@Deprecated
<R> void processBatchCallback(
List<? extends Row> list,
byte[] tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback
) throws IOException, InterruptedException;
public <R> void processBatchCallback(List<? extends Row> list,
final TableName tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback) throws IOException, InterruptedException;
@Deprecated
public <R> void processBatchCallback(List<? extends Row> list,
final byte[] tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback) throws IOException, InterruptedException;
/**
* Enable or disable region cache prefetch for the table. It will be
@ -371,9 +419,11 @@ public interface HConnection extends Abortable, Closeable {
* @param tableName name of table to configure.
* @param enable Set to true to enable region cache prefetch.
*/
void setRegionCachePrefetch(
final byte[] tableName, final boolean enable
);
public void setRegionCachePrefetch(final TableName tableName,
final boolean enable);
public void setRegionCachePrefetch(final byte[] tableName,
final boolean enable);
/**
* Check whether region cache prefetch is enabled or not.
@ -381,6 +431,8 @@ public interface HConnection extends Abortable, Closeable {
* @return true if table's region cache prefetch is enabled. Otherwise
* it is disabled.
*/
boolean getRegionCachePrefetch(final TableName tableName);
boolean getRegionCachePrefetch(final byte[] tableName);
/**
@ -395,8 +447,11 @@ public interface HConnection extends Abortable, Closeable {
* @return HTD[] table metadata
* @throws IOException if a remote or network exception occurs
*/
HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
throws IOException;
HTableDescriptor[] getHTableDescriptorsByTableName(List<TableName> tableNames) throws IOException;
@Deprecated
HTableDescriptor[] getHTableDescriptors(List<String> tableNames) throws
IOException;
/**
* @return true if this connection is closed

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -72,6 +73,22 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos
.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos
.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos
.GetTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos
.GetTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
@ -442,7 +459,7 @@ public class HConnectionManager {
* @return Number of cached regions for the table.
* @throws ZooKeeperConnectionException
*/
static int getCachedRegionCount(Configuration conf, final byte[] tableName)
static int getCachedRegionCount(Configuration conf, final TableName tableName)
throws IOException {
return execute(new HConnectable<Integer>(conf) {
@Override
@ -458,7 +475,9 @@ public class HConnectionManager {
* @return true if the region where the table and row reside is cached.
* @throws ZooKeeperConnectionException
*/
static boolean isRegionCached(Configuration conf, final byte[] tableName, final byte[] row)
static boolean isRegionCached(Configuration conf,
final TableName tableName,
final byte[] row)
throws IOException {
return execute(new HConnectable<Boolean>(conf) {
@Override
@ -542,11 +561,11 @@ public class HConnectionManager {
private RpcClient rpcClient;
/**
* Map of table to table {@link HRegionLocation}s. The table key is made
* by doing a {@link Bytes#mapKey(byte[])} of the table's name.
*/
private final Map<Integer, SoftValueSortedMap<byte [], HRegionLocation>> cachedRegionLocations =
new HashMap<Integer, SoftValueSortedMap<byte [], HRegionLocation>>();
* Map of table to table {@link HRegionLocation}s.
*/
private final Map<TableName, SoftValueSortedMap<byte[], HRegionLocation>>
cachedRegionLocations =
new HashMap<TableName, SoftValueSortedMap<byte[], HRegionLocation>>();
// The presence of a server in the map implies it's likely that there is an
// entry in cachedRegionLocations that map to this server; but the absence
@ -792,24 +811,41 @@ public class HConnectionManager {
}
@Override
public HRegionLocation getRegionLocation(final byte [] name,
public HRegionLocation getRegionLocation(final TableName tableName,
final byte [] row, boolean reload)
throws IOException {
return reload? relocateRegion(name, row): locateRegion(name, row);
return reload? relocateRegion(tableName, row): locateRegion(tableName, row);
}
@Override
public boolean isTableEnabled(byte[] tableName) throws IOException {
public HRegionLocation getRegionLocation(final byte[] tableName,
final byte [] row, boolean reload)
throws IOException {
return getRegionLocation(TableName.valueOf(tableName), row, reload);
}
@Override
public boolean isTableEnabled(TableName tableName) throws IOException {
return this.registry.isTableOnlineState(tableName, true);
}
@Override
public boolean isTableDisabled(byte[] tableName) throws IOException {
public boolean isTableEnabled(byte[] tableName) throws IOException {
return isTableEnabled(TableName.valueOf(tableName));
}
@Override
public boolean isTableDisabled(TableName tableName) throws IOException {
return this.registry.isTableOnlineState(tableName, false);
}
@Override
public boolean isTableAvailable(final byte[] tableName) throws IOException {
public boolean isTableDisabled(byte[] tableName) throws IOException {
return isTableDisabled(TableName.valueOf(tableName));
}
@Override
public boolean isTableAvailable(final TableName tableName) throws IOException {
final AtomicBoolean available = new AtomicBoolean(true);
final AtomicInteger regionCount = new AtomicInteger(0);
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@ -817,14 +853,15 @@ public class HConnectionManager {
public boolean processRow(Result row) throws IOException {
HRegionInfo info = MetaScanner.getHRegionInfo(row);
if (info != null) {
if (Bytes.compareTo(tableName, info.getTableName()) == 0) {
if (tableName.equals(info.getTableName())) {
ServerName server = HRegionInfo.getServerName(row);
if (server == null) {
available.set(false);
return false;
}
regionCount.incrementAndGet();
} else if (Bytes.compareTo(tableName, info.getTableName()) < 0) {
} else if (tableName.compareTo(
info.getTableName()) < 0) {
// Return if we are done with the current table
return false;
}
@ -837,7 +874,12 @@ public class HConnectionManager {
}
@Override
public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys)
public boolean isTableAvailable(final byte[] tableName) throws IOException {
return isTableAvailable(TableName.valueOf(tableName));
}
@Override
public boolean isTableAvailable(final TableName tableName, final byte[][] splitKeys)
throws IOException {
final AtomicBoolean available = new AtomicBoolean(true);
final AtomicInteger regionCount = new AtomicInteger(0);
@ -846,7 +888,7 @@ public class HConnectionManager {
public boolean processRow(Result row) throws IOException {
HRegionInfo info = MetaScanner.getHRegionInfo(row);
if (info != null) {
if (Bytes.compareTo(tableName, info.getTableName()) == 0) {
if (tableName.equals(info.getTableName())) {
ServerName server = HRegionInfo.getServerName(row);
if (server == null) {
available.set(false);
@ -864,7 +906,7 @@ public class HConnectionManager {
// Always empty start row should be counted
regionCount.incrementAndGet();
}
} else if (Bytes.compareTo(tableName, info.getTableName()) < 0) {
} else if (tableName.compareTo(info.getTableName()) < 0) {
// Return if we are done with the current table
return false;
}
@ -877,6 +919,12 @@ public class HConnectionManager {
return available.get() && (regionCount.get() == splitKeys.length + 1);
}
@Override
public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys)
throws IOException {
return isTableAvailable(TableName.valueOf(tableName), splitKeys);
}
@Override
public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
return locateRegion(HRegionInfo.getTableName(regionName),
@ -893,14 +941,20 @@ public class HConnectionManager {
}
@Override
public List<HRegionLocation> locateRegions(final byte[] tableName)
public List<HRegionLocation> locateRegions(final TableName tableName)
throws IOException {
return locateRegions (tableName, false, true);
}
@Override
public List<HRegionLocation> locateRegions(final byte[] tableName, final boolean useCache,
final boolean offlined) throws IOException {
public List<HRegionLocation> locateRegions(final byte[] tableName)
throws IOException {
return locateRegions(TableName.valueOf(tableName));
}
@Override
public List<HRegionLocation> locateRegions(final TableName tableName,
final boolean useCache, final boolean offlined) throws IOException {
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this,
tableName, offlined);
final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
@ -911,41 +965,59 @@ public class HConnectionManager {
}
@Override
public HRegionLocation locateRegion(final byte [] tableName,
public List<HRegionLocation> locateRegions(final byte[] tableName,
final boolean useCache, final boolean offlined) throws IOException {
return locateRegions(TableName.valueOf(tableName), useCache, offlined);
}
@Override
public HRegionLocation locateRegion(final TableName tableName,
final byte [] row)
throws IOException{
return locateRegion(tableName, row, true, true);
}
@Override
public HRegionLocation relocateRegion(final byte [] tableName,
public HRegionLocation locateRegion(final byte[] tableName,
final byte [] row)
throws IOException{
return locateRegion(TableName.valueOf(tableName), row);
}
@Override
public HRegionLocation relocateRegion(final TableName tableName,
final byte [] row) throws IOException{
// Since this is an explicit request not to use any caching, finding
// disabled tables should not be desirable. This will ensure that an exception is thrown when
// the first time a disabled table is interacted with.
if (isTableDisabled(tableName)) {
throw new DoNotRetryIOException(Bytes.toString(tableName) + " is disabled.");
throw new DoNotRetryIOException(tableName.getNameAsString() + " is disabled.");
}
return locateRegion(tableName, row, false, true);
}
private HRegionLocation locateRegion(final byte [] tableName,
@Override
public HRegionLocation relocateRegion(final byte[] tableName,
final byte [] row) throws IOException {
return relocateRegion(TableName.valueOf(tableName), row);
}
private HRegionLocation locateRegion(final TableName tableName,
final byte [] row, boolean useCache, boolean retry)
throws IOException {
if (this.closed) throw new IOException(toString() + " closed");
if (tableName == null || tableName.length == 0) {
if (tableName== null || tableName.getName().length == 0) {
throw new IllegalArgumentException(
"table name cannot be null or zero length");
}
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
if (tableName.equals(TableName.META_TABLE_NAME)) {
return this.registry.getMetaRegionLocation();
} else {
// Region not in the cache - have to go to the meta RS
return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row,
return locateRegionInMeta(TableName.META_TABLE_NAME, tableName, row,
useCache, userRegionLock, retry);
}
}
@ -955,7 +1027,7 @@ public class HConnectionManager {
* row we're seeking. It will prefetch certain number of regions info and
* save them to the global region cache.
*/
private void prefetchRegionCache(final byte[] tableName,
private void prefetchRegionCache(final TableName tableName,
final byte[] row) {
// Implement a new visitor for MetaScanner, and use it to walk through
// the .META.
@ -968,7 +1040,7 @@ public class HConnectionManager {
}
// possible we got a region of a different table...
if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
if (!regionInfo.getTableName().equals(tableName)) {
return false; // stop scanning
}
if (regionInfo.isOffline()) {
@ -994,7 +1066,7 @@ public class HConnectionManager {
try {
// pre-fetch certain number of regions info at region cache.
MetaScanner.metaScan(conf, this, visitor, tableName, row,
this.prefetchRegionLimit, HConstants.META_TABLE_NAME);
this.prefetchRegionLimit, TableName.META_TABLE_NAME);
} catch (IOException e) {
LOG.warn("Encountered problems when prefetch META table: ", e);
}
@ -1004,8 +1076,8 @@ public class HConnectionManager {
* Search the .META. table for the HRegionLocation
* info that contains the table and row we're seeking.
*/
private HRegionLocation locateRegionInMeta(final byte [] parentTable,
final byte [] tableName, final byte [] row, boolean useCache,
private HRegionLocation locateRegionInMeta(final TableName parentTable,
final TableName tableName, final byte [] row, boolean useCache,
Object regionLockObject, boolean retry)
throws IOException {
HRegionLocation location;
@ -1051,7 +1123,7 @@ public class HConnectionManager {
}
// If the parent table is META, we may want to pre-fetch some
// region info into the global region cache for this table.
if (Bytes.equals(parentTable, HConstants.META_TABLE_NAME)
if (parentTable.equals(TableName.META_TABLE_NAME)
&& (getRegionCachePrefetch(tableName))) {
prefetchRegionCache(tableName, row);
}
@ -1070,21 +1142,21 @@ public class HConnectionManager {
HConstants.CATALOG_FAMILY);
}
if (regionInfoRow == null) {
throw new TableNotFoundException(Bytes.toString(tableName));
throw new TableNotFoundException(tableName);
}
// convert the row result into the HRegionLocation we need!
HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow);
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " +
Bytes.toString(parentTable) + ", row=" + regionInfoRow);
parentTable + ", row=" + regionInfoRow);
}
// possible we got a region of a different table...
if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
if (!regionInfo.getTableName().equals(tableName)) {
throw new TableNotFoundException(
"Table '" + Bytes.toString(tableName) + "' was not found, got: " +
Bytes.toString(regionInfo.getTableName()) + ".");
"Table '" + tableName + "' was not found, got: " +
regionInfo.getTableName() + ".");
}
if (regionInfo.isSplit()) {
throw new RegionOfflineException("the only available region for" +
@ -1101,7 +1173,7 @@ public class HConnectionManager {
ServerName serverName = HRegionInfo.getServerName(regionInfoRow);
if (serverName == null) {
throw new NoServerForRegionException("No server address listed " +
"in " + Bytes.toString(parentTable) + " for region " +
"in " + parentTable + " for region " +
regionInfo.getRegionNameAsString() + " containing row " +
Bytes.toStringBinary(row));
}
@ -1129,7 +1201,7 @@ public class HConnectionManager {
if (tries < numTries - 1) {
if (LOG.isDebugEnabled()) {
LOG.debug("locateRegionInMeta parentTable=" +
Bytes.toString(parentTable) + ", metaLocation=" +
parentTable + ", metaLocation=" +
((metaLocation == null)? "null": "{" + metaLocation + "}") +
", attempt=" + tries + " of " +
this.numTries + " failed; retrying after sleep of " +
@ -1165,9 +1237,9 @@ public class HConnectionManager {
* @param row
* @return Null or region location found in cache.
*/
HRegionLocation getCachedLocation(final byte [] tableName,
HRegionLocation getCachedLocation(final TableName tableName,
final byte [] row) {
SoftValueSortedMap<byte [], HRegionLocation> tableLocations =
SoftValueSortedMap<byte[], HRegionLocation> tableLocations =
getTableLocations(tableName);
// start to examine the cache. we can only do cache actions
@ -1207,7 +1279,7 @@ public class HConnectionManager {
* @param tableName tableName
* @param row
*/
void forceDeleteCachedLocation(final byte [] tableName, final byte [] row) {
void forceDeleteCachedLocation(final TableName tableName, final byte [] row) {
HRegionLocation rl = null;
synchronized (this.cachedRegionLocations) {
Map<byte[], HRegionLocation> tableLocations = getTableLocations(tableName);
@ -1223,7 +1295,7 @@ public class HConnectionManager {
if ((rl != null) && LOG.isDebugEnabled()) {
LOG.debug("Removed " + rl.getHostname() + ":" + rl.getPort()
+ " as a location of " + rl.getRegionInfo().getRegionNameAsString() +
" for tableName=" + Bytes.toString(tableName) + " from cache");
" for tableName=" + tableName + " from cache");
}
}
@ -1259,18 +1331,16 @@ public class HConnectionManager {
* @param tableName
* @return Map of cached locations for passed <code>tableName</code>
*/
private SoftValueSortedMap<byte [], HRegionLocation> getTableLocations(
final byte [] tableName) {
private SoftValueSortedMap<byte[], HRegionLocation> getTableLocations(
final TableName tableName) {
// find the map of cached locations for this table
Integer key = Bytes.mapKey(tableName);
SoftValueSortedMap<byte [], HRegionLocation> result;
SoftValueSortedMap<byte[], HRegionLocation> result;
synchronized (this.cachedRegionLocations) {
result = this.cachedRegionLocations.get(key);
result = this.cachedRegionLocations.get(tableName);
// if tableLocations for this table isn't built yet, make one
if (result == null) {
result = new SoftValueSortedMap<byte [], HRegionLocation>(
Bytes.BYTES_COMPARATOR);
this.cachedRegionLocations.put(key, result);
result = new SoftValueSortedMap<byte[], HRegionLocation>(Bytes.BYTES_COMPARATOR);
this.cachedRegionLocations.put(tableName, result);
}
}
return result;
@ -1285,23 +1355,28 @@ public class HConnectionManager {
}
@Override
public void clearRegionCache(final byte [] tableName) {
public void clearRegionCache(final TableName tableName) {
synchronized (this.cachedRegionLocations) {
this.cachedRegionLocations.remove(Bytes.mapKey(tableName));
this.cachedRegionLocations.remove(tableName);
}
}
@Override
public void clearRegionCache(final byte[] tableName) {
clearRegionCache(TableName.valueOf(tableName));
}
/**
* Put a newly discovered HRegionLocation into the cache.
* @param tableName The table name.
* @param source the source of the new location, if it's not coming from meta
* @param location the new location
*/
private void cacheLocation(final byte [] tableName, final HRegionLocation source,
private void cacheLocation(final TableName tableName, final HRegionLocation source,
final HRegionLocation location) {
boolean isFromMeta = (source == null);
byte [] startKey = location.getRegionInfo().getStartKey();
Map<byte [], HRegionLocation> tableLocations =
Map<byte[], HRegionLocation> tableLocations =
getTableLocations(tableName);
boolean isNewCacheEntry = false;
boolean isStaleUpdate = false;
@ -2003,6 +2078,36 @@ public class HConnectionManager {
return stub.isMasterRunning(controller, request);
}
@Override
public ModifyNamespaceResponse modifyNamespace(RpcController controller, ModifyNamespaceRequest request) throws ServiceException {
return stub.modifyNamespace(controller, request);
}
@Override
public CreateNamespaceResponse createNamespace(RpcController controller, CreateNamespaceRequest request) throws ServiceException {
return stub.createNamespace(controller, request);
}
@Override
public DeleteNamespaceResponse deleteNamespace(RpcController controller, DeleteNamespaceRequest request) throws ServiceException {
return stub.deleteNamespace(controller, request);
}
@Override
public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, GetNamespaceDescriptorRequest request) throws ServiceException {
return stub.getNamespaceDescriptor(controller, request);
}
@Override
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, ListNamespaceDescriptorsRequest request) throws ServiceException {
return stub.listNamespaceDescriptors(controller, request);
}
@Override
public GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace(RpcController controller, GetTableDescriptorsByNamespaceRequest request) throws ServiceException {
return stub.getTableDescriptorsByNamespace(controller, request);
}
@Override
public void close() {
release(this.mss);
@ -2147,8 +2252,9 @@ public class HConnectionManager {
return;
}
synchronized (this.cachedRegionLocations) {
byte[] tableName = location.getRegionInfo().getTableName();
Map<byte[], HRegionLocation> tableLocations = getTableLocations(tableName);
TableName tableName = location.getRegionInfo().getTableName();
Map<byte[], HRegionLocation> tableLocations =
getTableLocations(tableName);
if (!tableLocations.isEmpty()) {
// Delete if there's something in the cache for this region.
HRegionLocation removedLocation =
@ -2156,7 +2262,7 @@ public class HConnectionManager {
if (LOG.isDebugEnabled() && removedLocation != null) {
LOG.debug("Removed " +
location.getRegionInfo().getRegionNameAsString() +
" for tableName=" + Bytes.toString(tableName) +
" for tableName=" + tableName +
" from cache");
}
}
@ -2171,11 +2277,11 @@ public class HConnectionManager {
* @param source server that is the source of the location update.
*/
@Override
public void updateCachedLocations(final byte[] tableName, byte[] rowkey,
public void updateCachedLocations(final TableName tableName, byte[] rowkey,
final Object exception, final HRegionLocation source) {
if (rowkey == null || tableName == null) {
LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
", tableName=" + (tableName == null ? "null" : Bytes.toString(tableName)));
", tableName=" + (tableName == null ? "null" : tableName));
return;
}
@ -2205,10 +2311,16 @@ public class HConnectionManager {
}
}
@Override
public void updateCachedLocations(final byte[] tableName, byte[] rowkey,
final Object exception, final HRegionLocation source) {
updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source);
}
@Override
@Deprecated
public void processBatch(List<? extends Row> list,
final byte[] tableName,
final TableName tableName,
ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
// This belongs in HTable!!! Not in here. St.Ack
@ -2221,6 +2333,15 @@ public class HConnectionManager {
processBatchCallback(list, tableName, pool, results, null);
}
@Override
@Deprecated
public void processBatch(List<? extends Row> list,
final byte[] tableName,
ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
processBatch(list, TableName.valueOf(tableName), pool, results);
}
/**
* Send the queries in parallel on the different region servers. Retries on failures.
* If the method returns it means that there is no error, and the 'results' array will
@ -2232,7 +2353,7 @@ public class HConnectionManager {
@Deprecated
public <R> void processBatchCallback(
List<? extends Row> list,
byte[] tableName,
TableName tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback)
@ -2252,8 +2373,20 @@ public class HConnectionManager {
}
}
@Override
@Deprecated
public <R> void processBatchCallback(
List<? extends Row> list,
byte[] tableName,
ExecutorService pool,
Object[] results,
Batch.Callback<R> callback)
throws IOException, InterruptedException {
processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback);
}
// For tests.
protected <R> AsyncProcess createAsyncProcess(byte[] tableName, ExecutorService pool,
protected <R> AsyncProcess createAsyncProcess(TableName tableName, ExecutorService pool,
AsyncProcess.AsyncProcessCallback<R> callback, Configuration conf) {
return new AsyncProcess<R>(this, tableName, pool, callback, conf,
RpcRetryingCallerFactory.instantiate(conf));
@ -2302,10 +2435,9 @@ public class HConnectionManager {
* Return the number of cached region for a table. It will only be called
* from a unit test.
*/
int getNumberOfCachedRegionLocations(final byte[] tableName) {
Integer key = Bytes.mapKey(tableName);
int getNumberOfCachedRegionLocations(final TableName tableName) {
synchronized (this.cachedRegionLocations) {
Map<byte[], HRegionLocation> tableLocs = this.cachedRegionLocations.get(key);
Map<byte[], HRegionLocation> tableLocs = this.cachedRegionLocations.get(tableName);
if (tableLocs == null) {
return 0;
}
@ -2320,25 +2452,36 @@ public class HConnectionManager {
* @param row row
* @return Region cached or not.
*/
boolean isRegionCached(final byte[] tableName, final byte[] row) {
boolean isRegionCached(TableName tableName, final byte[] row) {
HRegionLocation location = getCachedLocation(tableName, row);
return location != null;
}
@Override
public void setRegionCachePrefetch(final byte[] tableName,
public void setRegionCachePrefetch(final TableName tableName,
final boolean enable) {
if (!enable) {
regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName));
regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName.getName()));
}
else {
regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName));
regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName.getName()));
}
}
@Override
public boolean getRegionCachePrefetch(final byte[] tableName) {
return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName));
public void setRegionCachePrefetch(final byte[] tableName,
final boolean enable) {
setRegionCachePrefetch(TableName.valueOf(tableName), enable);
}
@Override
public boolean getRegionCachePrefetch(TableName tableName) {
return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName.getName()));
}
@Override
public boolean getRegionCachePrefetch(byte[] tableName) {
return getRegionCachePrefetch(TableName.valueOf(tableName));
}
@Override
@ -2457,7 +2600,7 @@ public class HConnectionManager {
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest((List<String>)null);
RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
@ -2467,7 +2610,8 @@ public class HConnectionManager {
}
@Override
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) throws IOException {
public HTableDescriptor[] getHTableDescriptorsByTableName(
List<TableName> tableNames) throws IOException {
if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
try {
@ -2481,6 +2625,17 @@ public class HConnectionManager {
}
}
@Override
public HTableDescriptor[] getHTableDescriptors(
List<String> names) throws IOException {
List<TableName> tableNames = new ArrayList(names.size());
for(String name : names) {
tableNames.add(TableName.valueOf(name));
}
return getHTableDescriptorsByTableName(tableNames);
}
/**
* Connects to the master to get the table descriptor.
* @param tableName table name
@ -2489,10 +2644,10 @@ public class HConnectionManager {
* is not found.
*/
@Override
public HTableDescriptor getHTableDescriptor(final byte[] tableName)
public HTableDescriptor getHTableDescriptor(final TableName tableName)
throws IOException {
if (tableName == null || tableName.length == 0) return null;
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
if (tableName == null) return null;
if (tableName.equals(TableName.META_TABLE_NAME)) {
return HTableDescriptor.META_TABLEDESC;
}
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
@ -2509,7 +2664,13 @@ public class HConnectionManager {
if (!htds.getTableSchemaList().isEmpty()) {
return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
}
throw new TableNotFoundException(Bytes.toString(tableName));
throw new TableNotFoundException(tableName.getNameAsString());
}
@Override
public HTableDescriptor getHTableDescriptor(final byte[] tableName)
throws IOException {
return getHTableDescriptor(TableName.valueOf(tableName));
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
@ -28,7 +29,9 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@ -106,87 +109,145 @@ public class HConnectionWrapper implements HConnection {
}
@Override
public boolean isTableEnabled(byte[] tableName) throws IOException {
public boolean isTableEnabled(TableName tableName) throws IOException {
return hconnection.isTableEnabled(tableName);
}
@Override
public boolean isTableDisabled(byte[] tableName) throws IOException {
public boolean isTableEnabled(byte[] tableName) throws IOException {
return isTableEnabled(TableName.valueOf(tableName));
}
@Override
public boolean isTableDisabled(TableName tableName) throws IOException {
return hconnection.isTableDisabled(tableName);
}
@Override
public boolean isTableAvailable(byte[] tableName) throws IOException {
public boolean isTableDisabled(byte[] tableName) throws IOException {
return isTableDisabled(TableName.valueOf(tableName));
}
@Override
public boolean isTableAvailable(TableName tableName) throws IOException {
return hconnection.isTableAvailable(tableName);
}
@Override
public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys)
throws IOException {
public boolean isTableAvailable(byte[] tableName) throws IOException {
return isTableAvailable(TableName.valueOf(tableName));
}
@Override
public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
return hconnection.isTableAvailable(tableName, splitKeys);
}
@Override
public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException {
return isTableAvailable(TableName.valueOf(tableName), splitKeys);
}
@Override
public HTableDescriptor[] listTables() throws IOException {
return hconnection.listTables();
}
@Override
public HTableDescriptor getHTableDescriptor(byte[] tableName)
throws IOException {
public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException {
return hconnection.getHTableDescriptor(tableName);
}
@Override
public HRegionLocation locateRegion(byte[] tableName, byte[] row)
throws IOException {
public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException {
return getHTableDescriptor(TableName.valueOf(tableName));
}
@Override
public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException {
return hconnection.locateRegion(tableName, row);
}
@Override
public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException {
return locateRegion(TableName.valueOf(tableName), row);
}
@Override
public void clearRegionCache() {
hconnection.clearRegionCache();
}
@Override
public void clearRegionCache(byte[] tableName) {
public void clearRegionCache(TableName tableName) {
hconnection.clearRegionCache(tableName);
}
@Override
public void clearRegionCache(byte[] tableName) {
clearRegionCache(TableName.valueOf(tableName));
}
@Override
public void deleteCachedRegionLocation(HRegionLocation location) {
hconnection.deleteCachedRegionLocation(location);
}
@Override
public HRegionLocation relocateRegion(byte[] tableName, byte[] row)
throws IOException {
public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException {
return hconnection.relocateRegion(tableName, row);
}
@Override
public void updateCachedLocations(byte[] tableName, byte[] rowkey,
Object exception, HRegionLocation source) {
public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException {
return relocateRegion(TableName.valueOf(tableName), row);
}
@Override
public void updateCachedLocations(TableName tableName,
byte[] rowkey,
Object exception,
HRegionLocation source) {
hconnection.updateCachedLocations(tableName, rowkey, exception, source);
}
@Override
public void updateCachedLocations(byte[] tableName,
byte[] rowkey,
Object exception,
HRegionLocation source) {
updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source);
}
@Override
public HRegionLocation locateRegion(byte[] regionName) throws IOException {
return hconnection.locateRegion(regionName);
}
@Override
public List<HRegionLocation> locateRegions(byte[] tableName)
throws IOException {
public List<HRegionLocation> locateRegions(TableName tableName) throws IOException {
return hconnection.locateRegions(tableName);
}
@Override
public List<HRegionLocation> locateRegions(byte[] tableName,
boolean useCache, boolean offlined) throws IOException {
public List<HRegionLocation> locateRegions(byte[] tableName) throws IOException {
return locateRegions(TableName.valueOf(tableName));
}
@Override
public List<HRegionLocation> locateRegions(TableName tableName,
boolean useCache,
boolean offlined) throws IOException {
return hconnection.locateRegions(tableName, useCache, offlined);
}
@Override
public List<HRegionLocation> locateRegions(byte[] tableName,
boolean useCache,
boolean offlined) throws IOException {
return locateRegions(TableName.valueOf(tableName));
}
@Override
public MasterAdminService.BlockingInterface getMasterAdmin() throws IOException {
return hconnection.getMasterAdmin();
@ -237,44 +298,86 @@ public class HConnectionWrapper implements HConnection {
}
@Override
public HRegionLocation getRegionLocation(byte[] tableName, byte[] row,
boolean reload) throws IOException {
public HRegionLocation getRegionLocation(TableName tableName,
byte[] row, boolean reload) throws IOException {
return hconnection.getRegionLocation(tableName, row, reload);
}
@Override
public void processBatch(List<? extends Row> actions, byte[] tableName,
ExecutorService pool, Object[] results) throws IOException,
InterruptedException {
public HRegionLocation getRegionLocation(byte[] tableName,
byte[] row, boolean reload) throws IOException {
return getRegionLocation(TableName.valueOf(tableName), row, reload);
}
@Override
public void processBatch(List<? extends Row> actions, TableName tableName, ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
hconnection.processBatch(actions, tableName, pool, results);
}
@Override
public <R> void processBatchCallback(List<? extends Row> list,
byte[] tableName, ExecutorService pool, Object[] results,
Callback<R> callback) throws IOException, InterruptedException {
public void processBatch(List<? extends Row> actions, byte[] tableName, ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
processBatch(actions, TableName.valueOf(tableName), pool, results);
}
@Override
public <R> void processBatchCallback(List<? extends Row> list, TableName tableName,
ExecutorService pool,
Object[] results,
Callback<R> callback)
throws IOException, InterruptedException {
hconnection.processBatchCallback(list, tableName, pool, results, callback);
}
@Override
public void setRegionCachePrefetch(byte[] tableName, boolean enable) {
public <R> void processBatchCallback(List<? extends Row> list, byte[] tableName,
ExecutorService pool,
Object[] results,
Callback<R> callback)
throws IOException, InterruptedException {
processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback);
}
@Override
public void setRegionCachePrefetch(TableName tableName, boolean enable) {
hconnection.setRegionCachePrefetch(tableName, enable);
}
@Override
public boolean getRegionCachePrefetch(byte[] tableName) {
public void setRegionCachePrefetch(byte[] tableName, boolean enable) {
setRegionCachePrefetch(TableName.valueOf(tableName), enable);
}
@Override
public boolean getRegionCachePrefetch(TableName tableName) {
return hconnection.getRegionCachePrefetch(tableName);
}
@Override
public boolean getRegionCachePrefetch(byte[] tableName) {
return getRegionCachePrefetch(TableName.valueOf(tableName));
}
@Override
public int getCurrentNrHRS() throws IOException {
return hconnection.getCurrentNrHRS();
}
@Override
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
throws IOException {
return hconnection.getHTableDescriptors(tableNames);
public HTableDescriptor[] getHTableDescriptorsByTableName(
List<TableName> tableNames) throws IOException {
return hconnection.getHTableDescriptorsByTableName(tableNames);
}
@Override
public HTableDescriptor[] getHTableDescriptors(
List<String> names) throws IOException {
List<TableName> tableNames = new ArrayList<TableName>(names.size());
for(String name : names) {
tableNames.add(TableName.valueOf(name));
}
return getHTableDescriptorsByTableName(tableNames);
}
@Override

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.Threads;
import java.io.Closeable;
@ -119,7 +119,7 @@ import java.util.concurrent.TimeUnit;
public class HTable implements HTableInterface {
private static final Log LOG = LogFactory.getLog(HTable.class);
protected HConnection connection;
private final byte [] tableName;
private final TableName tableName;
private volatile Configuration configuration;
protected List<Row> writeAsyncBuffer = new LinkedList<Row>();
private long writeBufferSize;
@ -150,10 +150,9 @@ public class HTable implements HTableInterface {
*/
public HTable(Configuration conf, final String tableName)
throws IOException {
this(conf, Bytes.toBytes(tableName));
this(conf, TableName.valueOf(tableName));
}
/**
* Creates an object to access a HBase table.
* Shares zookeeper connection and other resources with other HTable instances
@ -164,7 +163,24 @@ public class HTable implements HTableInterface {
* @param tableName Name of the table.
* @throws IOException if a remote or network exception occurs
*/
public HTable(Configuration conf, final byte [] tableName)
public HTable(Configuration conf, final byte[] tableName)
throws IOException {
this(conf, TableName.valueOf(tableName));
}
/**
* Creates an object to access a HBase table.
* Shares zookeeper connection and other resources with other HTable instances
* created with the same <code>conf</code> instance. Uses already-populated
* region cache if one is available, populated by any other HTable instances
* sharing this <code>conf</code> instance. Recommended.
* @param conf Configuration object to use.
* @param tableName table name pojo
* @throws IOException if a remote or network exception occurs
*/
public HTable(Configuration conf, final TableName tableName)
throws IOException {
this.tableName = tableName;
this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true;
@ -206,6 +222,23 @@ public class HTable implements HTableInterface {
*/
public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool)
throws IOException {
this(conf, TableName.valueOf(tableName), pool);
}
/**
* Creates an object to access a HBase table.
* Shares zookeeper connection and other resources with other HTable instances
* created with the same <code>conf</code> instance. Uses already-populated
* region cache if one is available, populated by any other HTable instances
* sharing this <code>conf</code> instance.
* Use this constructor when the ExecutorService is externally managed.
* @param conf Configuration object to use.
* @param tableName Name of the table.
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
*/
public HTable(Configuration conf, final TableName tableName, final ExecutorService pool)
throws IOException {
this.connection = HConnectionManager.getConnection(conf);
this.configuration = conf;
this.pool = pool;
@ -229,6 +262,22 @@ public class HTable implements HTableInterface {
*/
public HTable(final byte[] tableName, final HConnection connection,
final ExecutorService pool) throws IOException {
this(TableName.valueOf(tableName), connection, pool);
}
/**
* Creates an object to access a HBase table.
* Shares zookeeper connection and other resources with other HTable instances
* created with the same <code>connection</code> instance.
* Use this constructor when the ExecutorService and HConnection instance are
* externally managed.
* @param tableName Name of the table.
* @param connection HConnection to be used.
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
*/
public HTable(TableName tableName, final HConnection connection,
final ExecutorService pool) throws IOException {
if (connection == null || connection.isClosed()) {
throw new IllegalArgumentException("Connection is null or closed.");
}
@ -245,7 +294,7 @@ public class HTable implements HTableInterface {
* For internal testing.
*/
protected HTable(){
tableName = new byte[]{};
tableName = null;
cleanupPoolOnClose = false;
cleanupConnectionOnClose = false;
}
@ -255,7 +304,7 @@ public class HTable implements HTableInterface {
*/
private void finishSetup() throws IOException {
this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ?
this.operationTimeout = HTableDescriptor.isSystemTable(tableName) ?
this.configuration.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT):
this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
@ -299,7 +348,21 @@ public class HTable implements HTableInterface {
*/
@Deprecated
public static boolean isTableEnabled(String tableName) throws IOException {
return isTableEnabled(Bytes.toBytes(tableName));
return isTableEnabled(TableName.valueOf(tableName));
}
/**
* Tells whether or not a table is enabled or not. This method creates a
* new HBase configuration, so it might make your unit tests fail due to
* incorrect ZK client port.
* @param tableName Name of table to check.
* @return {@code true} if table is online.
* @throws IOException if a remote or network exception occurs
* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
*/
@Deprecated
public static boolean isTableEnabled(byte[] tableName) throws IOException {
return isTableEnabled(TableName.valueOf(tableName));
}
/**
@ -312,7 +375,7 @@ public class HTable implements HTableInterface {
* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
*/
@Deprecated
public static boolean isTableEnabled(byte[] tableName) throws IOException {
public static boolean isTableEnabled(TableName tableName) throws IOException {
return isTableEnabled(HBaseConfiguration.create(), tableName);
}
@ -327,7 +390,7 @@ public class HTable implements HTableInterface {
@Deprecated
public static boolean isTableEnabled(Configuration conf, String tableName)
throws IOException {
return isTableEnabled(conf, Bytes.toBytes(tableName));
return isTableEnabled(conf, TableName.valueOf(tableName));
}
/**
@ -336,11 +399,25 @@ public class HTable implements HTableInterface {
* @param tableName Name of table to check.
* @return {@code true} if table is online.
* @throws IOException if a remote or network exception occurs
* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[] tableName)}
* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
*/
@Deprecated
public static boolean isTableEnabled(Configuration conf, byte[] tableName)
throws IOException {
return isTableEnabled(conf, TableName.valueOf(tableName));
}
/**
* Tells whether or not a table is enabled or not.
* @param conf The Configuration object to use.
* @param tableName Name of table to check.
* @return {@code true} if table is online.
* @throws IOException if a remote or network exception occurs
* @deprecated use {@link HBaseAdmin#isTableEnabled(org.apache.hadoop.hbase.TableName tableName)}
*/
@Deprecated
public static boolean isTableEnabled(Configuration conf,
final byte[] tableName) throws IOException {
final TableName tableName) throws IOException {
return HConnectionManager.execute(new HConnectable<Boolean>(conf) {
@Override
public Boolean connect(HConnection connection) throws IOException {
@ -388,7 +465,12 @@ public class HTable implements HTableInterface {
*/
@Override
public byte [] getTableName() {
return this.tableName;
return this.tableName.getName();
}
@Override
public TableName getName() {
return tableName;
}
/**
@ -502,7 +584,7 @@ public class HTable implements HTableInterface {
*/
public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws IOException {
// TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation.
return MetaScanner.allTableRegions(getConfiguration(), this.connection, getTableName(), false);
return MetaScanner.allTableRegions(getConfiguration(), this.connection, getName(), false);
}
/**
@ -611,7 +693,8 @@ public class HTable implements HTableInterface {
if (scan.getCaching() <= 0) {
scan.setCaching(getScannerCaching());
}
return new ClientScanner(getConfiguration(), scan, getTableName(), this.connection);
return new ClientScanner(getConfiguration(), scan,
getName(), this.connection);
}
/**
@ -641,7 +724,7 @@ public class HTable implements HTableInterface {
@Override
public Result get(final Get get) throws IOException {
RegionServerCallable<Result> callable = new RegionServerCallable<Result>(this.connection,
getTableName(), get.getRow()) {
getName(), get.getRow()) {
public Result call() throws IOException {
return ProtobufUtil.get(getStub(), getLocation().getRegionInfo().getRegionName(), get);
}
@ -813,7 +896,7 @@ public class HTable implements HTableInterface {
if (synchronous || ap.hasError()) {
if (ap.hasError() && LOG.isDebugEnabled()) {
LOG.debug(Bytes.toString(tableName) + ": One or more of the operations have failed -" +
LOG.debug(tableName + ": One or more of the operations have failed -" +
" waiting for all operation in progress to finish (successfully or not)");
}
ap.waitUntilDone();
@ -845,7 +928,7 @@ public class HTable implements HTableInterface {
@Override
public void mutateRow(final RowMutations rm) throws IOException {
RegionServerCallable<Void> callable =
new RegionServerCallable<Void>(connection, getTableName(), rm.getRow()) {
new RegionServerCallable<Void>(connection, getName(), rm.getRow()) {
public Void call() throws IOException {
try {
MultiRequest request = RequestConverter.buildMultiRequest(
@ -870,7 +953,7 @@ public class HTable implements HTableInterface {
"Invalid arguments to append, no columns specified");
}
RegionServerCallable<Result> callable =
new RegionServerCallable<Result>(this.connection, getTableName(), append.getRow()) {
new RegionServerCallable<Result>(this.connection, getName(), append.getRow()) {
public Result call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@ -897,7 +980,7 @@ public class HTable implements HTableInterface {
"Invalid arguments to increment, no columns specified");
}
RegionServerCallable<Result> callable = new RegionServerCallable<Result>(this.connection,
getTableName(), increment.getRow()) {
getName(), increment.getRow()) {
public Result call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@ -944,7 +1027,7 @@ public class HTable implements HTableInterface {
}
RegionServerCallable<Long> callable =
new RegionServerCallable<Long>(connection, getTableName(), row) {
new RegionServerCallable<Long>(connection, getName(), row) {
public Long call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@ -972,7 +1055,7 @@ public class HTable implements HTableInterface {
final Put put)
throws IOException {
RegionServerCallable<Boolean> callable =
new RegionServerCallable<Boolean>(connection, getTableName(), row) {
new RegionServerCallable<Boolean>(connection, getName(), row) {
public Boolean call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@ -998,7 +1081,7 @@ public class HTable implements HTableInterface {
final Delete delete)
throws IOException {
RegionServerCallable<Boolean> callable =
new RegionServerCallable<Boolean>(connection, getTableName(), row) {
new RegionServerCallable<Boolean>(connection, getName(), row) {
public Boolean call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@ -1020,7 +1103,7 @@ public class HTable implements HTableInterface {
@Override
public boolean exists(final Get get) throws IOException {
RegionServerCallable<Boolean> callable =
new RegionServerCallable<Boolean>(connection, getTableName(), get.getRow()) {
new RegionServerCallable<Boolean>(connection, getName(), get.getRow()) {
public Boolean call() throws IOException {
try {
GetRequest request = RequestConverter.buildGetRequest(
@ -1124,7 +1207,7 @@ public class HTable implements HTableInterface {
Callable<List<Boolean>> callable = new Callable<List<Boolean>>() {
public List<Boolean> call() throws Exception {
RegionServerCallable<List<Boolean>> callable =
new RegionServerCallable<List<Boolean>>(connection, getTableName(),
new RegionServerCallable<List<Boolean>>(connection, getName(),
getsByRegionEntry.getValue().get(0).getRow()) {
public List<Boolean> call() throws IOException {
try {
@ -1139,7 +1222,7 @@ public class HTable implements HTableInterface {
}
};
return rpcCallerFactory.<List<Boolean>> newCaller().callWithRetries(callable,
operationTimeout);
operationTimeout);
}
};
futures.put(getsByRegionEntry.getKey(), pool.submit(callable));
@ -1352,6 +1435,12 @@ public class HTable implements HTableInterface {
*/
public static void setRegionCachePrefetch(final byte[] tableName,
final boolean enable) throws IOException {
setRegionCachePrefetch(TableName.valueOf(tableName), enable);
}
public static void setRegionCachePrefetch(
final TableName tableName,
final boolean enable) throws IOException {
HConnectionManager.execute(new HConnectable<Void>(HBaseConfiguration
.create()) {
@Override
@ -1374,6 +1463,12 @@ public class HTable implements HTableInterface {
*/
public static void setRegionCachePrefetch(final Configuration conf,
final byte[] tableName, final boolean enable) throws IOException {
setRegionCachePrefetch(conf, TableName.valueOf(tableName), enable);
}
public static void setRegionCachePrefetch(final Configuration conf,
final TableName tableName,
final boolean enable) throws IOException {
HConnectionManager.execute(new HConnectable<Void>(conf) {
@Override
public Void connect(HConnection connection) throws IOException {
@ -1393,6 +1488,11 @@ public class HTable implements HTableInterface {
*/
public static boolean getRegionCachePrefetch(final Configuration conf,
final byte[] tableName) throws IOException {
return getRegionCachePrefetch(conf, TableName.valueOf(tableName));
}
public static boolean getRegionCachePrefetch(final Configuration conf,
final TableName tableName) throws IOException {
return HConnectionManager.execute(new HConnectable<Boolean>(conf) {
@Override
public Boolean connect(HConnection connection) throws IOException {
@ -1409,6 +1509,11 @@ public class HTable implements HTableInterface {
* @throws IOException
*/
public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException {
return getRegionCachePrefetch(TableName.valueOf(tableName));
}
public static boolean getRegionCachePrefetch(
final TableName tableName) throws IOException {
return HConnectionManager.execute(new HConnectable<Boolean>(
HBaseConfiguration.create()) {
@Override
@ -1416,7 +1521,7 @@ public class HTable implements HTableInterface {
return connection.getRegionCachePrefetch(tableName);
}
});
}
}
/**
* Explicitly clears the region cache to fetch the latest value from META.

View File

@ -23,6 +23,7 @@ import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@ -49,6 +50,11 @@ public interface HTableInterface extends Closeable {
*/
byte[] getTableName();
/**
* Gets the fully qualified table name instance of this table.
*/
TableName getName();
/**
* Returns the {@link Configuration} object used by this instance.
* <p>

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@ -67,7 +68,7 @@ public class HTableMultiplexer {
static final String TABLE_MULTIPLEXER_FLUSH_FREQ_MS = "hbase.tablemultiplexer.flush.frequency.ms";
private Map<byte[], HTable> tableNameToHTableMap;
private Map<TableName, HTable> tableNameToHTableMap;
/** The map between each region server to its corresponding buffer queue */
private Map<HRegionLocation, LinkedBlockingQueue<PutStatus>>
@ -92,7 +93,7 @@ public class HTableMultiplexer {
this.serverToBufferQueueMap = new ConcurrentHashMap<HRegionLocation,
LinkedBlockingQueue<PutStatus>>();
this.serverToFlushWorkerMap = new ConcurrentHashMap<HRegionLocation, HTableFlushWorker>();
this.tableNameToHTableMap = new ConcurrentSkipListMap<byte[], HTable>(Bytes.BYTES_COMPARATOR);
this.tableNameToHTableMap = new ConcurrentSkipListMap<TableName, HTable>();
this.retryNum = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
@ -101,24 +102,28 @@ public class HTableMultiplexer {
/**
* The put request will be buffered by its corresponding buffer queue. Return false if the queue
* is already full.
* @param table
* @param tableName
* @param put
* @return true if the request can be accepted by its corresponding buffer queue.
* @throws IOException
*/
public boolean put(final byte[] table, final Put put) throws IOException {
return put(table, put, this.retryNum);
public boolean put(TableName tableName, final Put put) throws IOException {
return put(tableName, put, this.retryNum);
}
public boolean put(byte[] tableName, final Put put) throws IOException {
return put(TableName.valueOf(tableName), put);
}
/**
* The puts request will be buffered by their corresponding buffer queue.
* Return the list of puts which could not be queued.
* @param table
* @param tableName
* @param puts
* @return the list of puts which could not be queued
* @throws IOException
*/
public List<Put> put(final byte[] table, final List<Put> puts)
public List<Put> put(TableName tableName, final List<Put> puts)
throws IOException {
if (puts == null)
return null;
@ -126,7 +131,7 @@ public class HTableMultiplexer {
List <Put> failedPuts = null;
boolean result;
for (Put put : puts) {
result = put(table, put, this.retryNum);
result = put(tableName, put, this.retryNum);
if (result == false) {
// Create the failed puts list if necessary
@ -140,24 +145,29 @@ public class HTableMultiplexer {
return failedPuts;
}
public List<Put> put(byte[] tableName, final List<Put> puts) throws IOException {
return put(TableName.valueOf(tableName), puts);
}
/**
* The put request will be buffered by its corresponding buffer queue. And the put request will be
* retried before dropping the request.
* Return false if the queue is already full.
* @param table
* @param tableName
* @param put
* @param retry
* @return true if the request can be accepted by its corresponding buffer queue.
* @throws IOException
*/
public boolean put(final byte[] table, final Put put, int retry)
public boolean put(final TableName tableName, final Put put, int retry)
throws IOException {
if (retry <= 0) {
return false;
}
LinkedBlockingQueue<PutStatus> queue;
HTable htable = getHTable(table);
HTable htable = getHTable(tableName);
try {
htable.validatePut(put);
HRegionLocation loc = htable.getRegionLocation(put.getRow(), false);
@ -175,6 +185,11 @@ public class HTableMultiplexer {
return false;
}
public boolean put(final byte[] tableName, final Put put, int retry)
throws IOException {
return put(TableName.valueOf(tableName), put, retry);
}
/**
* @return the current HTableMultiplexerStatus
*/
@ -183,14 +198,14 @@ public class HTableMultiplexer {
}
private HTable getHTable(final byte[] table) throws IOException {
HTable htable = this.tableNameToHTableMap.get(table);
private HTable getHTable(TableName tableName) throws IOException {
HTable htable = this.tableNameToHTableMap.get(tableName);
if (htable == null) {
synchronized (this.tableNameToHTableMap) {
htable = this.tableNameToHTableMap.get(table);
htable = this.tableNameToHTableMap.get(tableName);
if (htable == null) {
htable = new HTable(conf, table);
this.tableNameToHTableMap.put(table, htable);
htable = new HTable(conf, tableName);
this.tableNameToHTableMap.put(tableName, htable);
}
}
}
@ -435,7 +450,7 @@ public class HTableMultiplexer {
HRegionLocation oldLoc) throws IOException {
Put failedPut = failedPutStatus.getPut();
// The currentPut is failed. So get the table name for the currentPut.
byte[] tableName = failedPutStatus.getRegionInfo().getTableName();
TableName tableName = failedPutStatus.getRegionInfo().getTableName();
// Decrease the retry count
int retryCount = failedPutStatus.getRetryCount() - 1;

View File

@ -23,6 +23,7 @@ import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@ -340,6 +341,11 @@ public class HTablePool implements Closeable {
return table.getTableName();
}
@Override
public TableName getName() {
return table.getName();
}
@Override
public Configuration getConfiguration() {
checkState();

View File

@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
@ -76,10 +77,10 @@ public class MetaScanner {
* @throws IOException e
*/
public static void metaScan(Configuration configuration, HConnection connection,
MetaScannerVisitor visitor, byte [] userTableName)
MetaScannerVisitor visitor, TableName userTableName)
throws IOException {
metaScan(configuration, connection, visitor, userTableName, null, Integer.MAX_VALUE,
HConstants.META_TABLE_NAME);
TableName.META_TABLE_NAME);
}
/**
@ -98,11 +99,11 @@ public class MetaScanner {
* @throws IOException e
*/
public static void metaScan(Configuration configuration,
MetaScannerVisitor visitor, byte [] userTableName, byte[] row,
MetaScannerVisitor visitor, TableName userTableName, byte[] row,
int rowLimit)
throws IOException {
metaScan(configuration, null, visitor, userTableName, row, rowLimit,
HConstants.META_TABLE_NAME);
TableName.META_TABLE_NAME);
}
/**
@ -123,15 +124,15 @@ public class MetaScanner {
* @throws IOException e
*/
public static void metaScan(Configuration configuration, HConnection connection,
final MetaScannerVisitor visitor, final byte[] tableName,
final byte[] row, final int rowLimit, final byte[] metaTableName)
final MetaScannerVisitor visitor, final TableName tableName,
final byte[] row, final int rowLimit, final TableName metaTableName)
throws IOException {
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
HTable metaTable;
if (connection == null) {
metaTable = new HTable(configuration, HConstants.META_TABLE_NAME, null);
metaTable = new HTable(configuration, TableName.META_TABLE_NAME, null);
} else {
metaTable = new HTable(HConstants.META_TABLE_NAME, connection, null);
metaTable = new HTable(TableName.META_TABLE_NAME, connection, null);
}
// Calculate startrow for scan.
byte[] startRow;
@ -142,17 +143,18 @@ public class MetaScanner {
byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
if (startRowResult == null) {
throw new TableNotFoundException("Cannot find row in .META. for table: " +
Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
throw new TableNotFoundException("Cannot find row in "+ TableName
.META_TABLE_NAME.getNameAsString()+" for table: "
+ tableName + ", row=" + Bytes.toStringBinary(searchRow));
}
HRegionInfo regionInfo = getHRegionInfo(startRowResult);
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in Meta for " +
Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
tableName + ", row=" + Bytes.toStringBinary(searchRow));
}
byte[] rowBefore = regionInfo.getStartKey();
startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false);
} else if (tableName == null || tableName.length == 0) {
} else if (tableName == null || tableName.getName().length == 0) {
// Full META scan
startRow = HConstants.EMPTY_START_ROW;
} else {
@ -165,7 +167,7 @@ public class MetaScanner {
HConstants.DEFAULT_HBASE_META_SCANNER_CACHING));
scan.setCaching(rows);
if (LOG.isTraceEnabled()) {
LOG.trace("Scanning " + Bytes.toString(metaTableName) + " starting at row=" +
LOG.trace("Scanning " + metaTableName.getNameAsString() + " starting at row=" +
Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
}
// Run the scan
@ -267,11 +269,11 @@ public class MetaScanner {
* @throws IOException
*/
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf,
HConnection connection,
final byte [] tablename, final boolean offlined) throws IOException {
HConnection connection, final TableName tableName,
final boolean offlined) throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tablename) {
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
@Override
public boolean processRowInternal(Result rowResult) throws IOException {
HRegionInfo info = getHRegionInfo(rowResult);
@ -280,7 +282,7 @@ public class MetaScanner {
return true;
}
};
metaScan(conf, connection, visitor, tablename);
metaScan(conf, connection, visitor, tableName);
return regions;
}
@ -340,9 +342,9 @@ public class MetaScanner {
* META entries for daughters are available during splits.
*/
public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor {
private byte[] tableName;
private TableName tableName;
public TableMetaScannerVisitor(byte[] tableName) {
public TableMetaScannerVisitor(TableName tableName) {
super();
this.tableName = tableName;
}
@ -353,7 +355,7 @@ public class MetaScanner {
if (info == null) {
return true;
}
if (!(Bytes.equals(info.getTableName(), tableName))) {
if (!(info.getTableName().equals(tableName))) {
return false;
}
return super.processRow(rowResult);

View File

@ -23,6 +23,7 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@ -42,7 +43,7 @@ import com.google.protobuf.ServiceException;
class MultiServerCallable<R> extends RegionServerCallable<MultiResponse> {
private final MultiAction<R> multi;
MultiServerCallable(final HConnection connection, final byte [] tableName,
MultiServerCallable(final HConnection connection, final TableName tableName,
final HRegionLocation location, final MultiAction<R> multi) {
super(connection, tableName, null);
this.multi = multi;
@ -120,4 +121,4 @@ class MultiServerCallable<R> extends RegionServerCallable<MultiResponse> {
// Use the location we were given in the constructor rather than go look it up.
setStub(getConnection().getClient(getLocation().getServerName()));
}
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.util.Bytes;
@ -44,7 +45,7 @@ public abstract class RegionServerCallable<T> implements RetryingCallable<T> {
// Public because used outside of this package over in ipc.
static final Log LOG = LogFactory.getLog(RegionServerCallable.class);
private final HConnection connection;
private final byte [] tableName;
private final TableName tableName;
private final byte [] row;
private HRegionLocation location;
private ClientService.BlockingInterface stub;
@ -56,7 +57,7 @@ public abstract class RegionServerCallable<T> implements RetryingCallable<T> {
* @param tableName Table name to which <code>row</code> belongs.
* @param row The row we want in <code>tableName</code>.
*/
public RegionServerCallable(HConnection connection, byte [] tableName, byte [] row) {
public RegionServerCallable(HConnection connection, TableName tableName, byte [] row) {
this.connection = connection;
this.tableName = tableName;
this.row = row;
@ -71,7 +72,7 @@ public abstract class RegionServerCallable<T> implements RetryingCallable<T> {
public void prepare(final boolean reload) throws IOException {
this.location = connection.getRegionLocation(tableName, row, reload);
if (this.location == null) {
throw new IOException("Failed to find location, tableName=" + Bytes.toString(tableName) +
throw new IOException("Failed to find location, tableName=" + tableName +
", row=" + Bytes.toString(row) + ", reload=" + reload);
}
setStub(getConnection().getClient(getLocation().getServerName()));
@ -100,7 +101,7 @@ public abstract class RegionServerCallable<T> implements RetryingCallable<T> {
this.location = location;
}
public byte [] getTableName() {
public TableName getTableName() {
return this.tableName;
}
@ -129,7 +130,7 @@ public abstract class RegionServerCallable<T> implements RetryingCallable<T> {
@Override
public String getExceptionMessageAdditionalDetail() {
return "row '" + Bytes.toString(row) + "' on table '" + Bytes.toString(tableName);
return "row '" + Bytes.toString(row) + "' on table '" + tableName;
}
@Override

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionLocation;
/**
@ -46,7 +47,7 @@ interface Registry {
* @param enabled Return true if table is enabled
* @throws IOException
*/
boolean isTableOnlineState(byte [] tableName, boolean enabled) throws IOException;
boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException;
/**
* @return Count of 'running' regionservers

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
@ -89,7 +90,7 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
* @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable
* won't collect metrics
*/
public ScannerCallable (HConnection connection, byte [] tableName, Scan scan,
public ScannerCallable (HConnection connection, TableName tableName, Scan scan,
ScanMetrics scanMetrics) {
super(connection, tableName, scan.getStartRow());
this.scan = scan;

View File

@ -40,7 +40,7 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor {
* @param desc
*/
UnmodifyableHTableDescriptor(final HTableDescriptor desc) {
super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues());
super(desc.getTableName(), getUnmodifyableFamilies(desc), desc.getValues());
}

View File

@ -21,10 +21,10 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly;
@ -95,15 +95,14 @@ class ZooKeeperRegistry implements Registry {
}
@Override
public boolean isTableOnlineState(byte [] tableName, boolean enabled)
public boolean isTableOnlineState(TableName tableName, boolean enabled)
throws IOException {
String tableNameStr = Bytes.toString(tableName);
ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
try {
if (enabled) {
return ZKTableReadOnly.isEnabledTable(zkw, tableNameStr);
return ZKTableReadOnly.isEnabledTable(zkw, tableName);
}
return ZKTableReadOnly.isDisabledTable(zkw, tableNameStr);
return ZKTableReadOnly.isDisabledTable(zkw, tableName);
} catch (KeeperException e) {
throw new IOException("Enable/Disable failed", e);
} finally {

View File

@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTable;
@ -99,7 +100,7 @@ public class AggregationClient {
* & propagated to it.
*/
public <R, S, P extends Message, Q extends Message, T extends Message> R max(
final byte[] tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable {
HTable table = null;
try {
@ -188,7 +189,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message> R min(
final byte[] tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable {
HTable table = null;
try {
@ -268,7 +269,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(
final byte[] tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable {
HTable table = null;
try {
@ -342,7 +343,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message> S sum(
final byte[] tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable {
HTable table = null;
try {
@ -415,7 +416,7 @@ public class AggregationClient {
* @throws Throwable
*/
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
final byte[] tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable {
HTable table = null;
try {
@ -498,7 +499,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message>
double avg(final byte[] tableName,
double avg(final TableName tableName,
final ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable {
Pair<S, Long> p = getAvgArgs(tableName, ci, scan);
return ci.divideForAvg(p.getFirst(), p.getSecond());
@ -606,7 +607,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message>
double std(final byte[] tableName, ColumnInterpreter<R, S, P, Q, T> ci,
double std(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
Scan scan) throws Throwable {
HTable table = null;
try {
@ -719,7 +720,7 @@ public class AggregationClient {
* @throws Throwable
*/
public <R, S, P extends Message, Q extends Message, T extends Message>
R median(final byte[] tableName, ColumnInterpreter<R, S, P, Q, T> ci,
R median(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
Scan scan) throws Throwable {
HTable table = null;
try {

View File

@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.client.coprocessor;
import com.google.protobuf.ByteString;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos;
import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
@ -45,7 +47,7 @@ public class SecureBulkLoadClient {
this.table = table;
}
public String prepareBulkLoad(final byte[] tableName) throws IOException {
public String prepareBulkLoad(final TableName tableName) throws IOException {
try {
return
table.coprocessorService(SecureBulkLoadProtos.SecureBulkLoadService.class,
@ -61,7 +63,7 @@ public class SecureBulkLoadClient {
SecureBulkLoadProtos.PrepareBulkLoadRequest request =
SecureBulkLoadProtos.PrepareBulkLoadRequest.newBuilder()
.setTableName(com.google.protobuf.ByteString.copyFrom(tableName)).build();
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
instance.prepareBulkLoad(controller,
request,

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.RegionServerCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
@ -48,13 +49,13 @@ public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
private static Log LOG = LogFactory.getLog(RegionCoprocessorRpcChannel.class);
private final HConnection connection;
private final byte[] table;
private final TableName table;
private final byte[] row;
private byte[] lastRegion;
private RpcRetryingCallerFactory rpcFactory;
public RegionCoprocessorRpcChannel(HConnection conn, byte[] table, byte[] row) {
public RegionCoprocessorRpcChannel(HConnection conn, TableName table, byte[] row) {
this.connection = conn;
this.table = table;
this.row = row;

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
@ -1563,7 +1565,7 @@ public final class ProtobufUtil {
* @return the converted Permission
*/
public static Permission toPermission(AccessControlProtos.Permission proto) {
if (proto.hasTable()) {
if (proto.hasTableName()) {
return toTablePermission(proto);
} else {
List<Permission.Action> actions = toPermissionActions(proto.getActionList());
@ -1582,9 +1584,9 @@ public final class ProtobufUtil {
byte[] qualifier = null;
byte[] family = null;
byte[] table = null;
TableName table = null;
if (proto.hasTable()) table = proto.getTable().toByteArray();
if (proto.hasTableName()) table = ProtobufUtil.toTableName(proto.getTableName());
if (proto.hasFamily()) family = proto.getFamily().toByteArray();
if (proto.hasQualifier()) qualifier = proto.getQualifier().toByteArray();
@ -1603,7 +1605,7 @@ public final class ProtobufUtil {
if (perm instanceof TablePermission) {
TablePermission tablePerm = (TablePermission)perm;
if (tablePerm.hasTable()) {
builder.setTable(ByteString.copyFrom(tablePerm.getTable()));
builder.setTableName(ProtobufUtil.toProtoTableName(tablePerm.getTable()));
}
if (tablePerm.hasFamily()) {
builder.setFamily(ByteString.copyFrom(tablePerm.getFamily()));
@ -1692,7 +1694,7 @@ public final class ProtobufUtil {
permissionBuilder.addAction(toPermissionAction(a));
}
if (perm.hasTable()) {
permissionBuilder.setTable(ByteString.copyFrom(perm.getTable()));
permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(perm.getTable()));
}
if (perm.hasFamily()) {
permissionBuilder.setFamily(ByteString.copyFrom(perm.getFamily()));
@ -1719,9 +1721,9 @@ public final class ProtobufUtil {
byte[] qualifier = null;
byte[] family = null;
byte[] table = null;
TableName table = null;
if (permission.hasTable()) table = permission.getTable().toByteArray();
if (permission.hasTableName()) table = ProtobufUtil.toTableName(permission.getTableName());
if (permission.hasFamily()) family = permission.getFamily().toByteArray();
if (permission.hasQualifier()) qualifier = permission.getQualifier().toByteArray();
@ -1762,14 +1764,14 @@ public final class ProtobufUtil {
*
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to grant permissions
* @param t optional table name
* @param tableName optional table name
* @param f optional column family
* @param q optional qualifier
* @param actions the permissions to be granted
* @throws ServiceException
*/
public static void grant(AccessControlService.BlockingInterface protocol,
String userShortName, byte[] t, byte[] f, byte[] q,
String userShortName, TableName tableName, byte[] f, byte[] q,
Permission.Action... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions =
Lists.newArrayListWithCapacity(actions.length);
@ -1777,7 +1779,7 @@ public final class ProtobufUtil {
permActions.add(ProtobufUtil.toPermissionAction(a));
}
AccessControlProtos.GrantRequest request = RequestConverter.
buildGrantRequest(userShortName, t, f, q, permActions.toArray(
buildGrantRequest(userShortName, tableName, f, q, permActions.toArray(
new AccessControlProtos.Permission.Action[actions.length]));
protocol.grant(null, request);
}
@ -1791,14 +1793,14 @@ public final class ProtobufUtil {
*
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to revoke permissions
* @param t optional table name
* @param tableName optional table name
* @param f optional column family
* @param q optional qualifier
* @param actions the permissions to be revoked
* @throws ServiceException
*/
public static void revoke(AccessControlService.BlockingInterface protocol,
String userShortName, byte[] t, byte[] f, byte[] q,
String userShortName, TableName tableName, byte[] f, byte[] q,
Permission.Action... actions) throws ServiceException {
List<AccessControlProtos.Permission.Action> permActions =
Lists.newArrayListWithCapacity(actions.length);
@ -1806,7 +1808,7 @@ public final class ProtobufUtil {
permActions.add(ProtobufUtil.toPermissionAction(a));
}
AccessControlProtos.RevokeRequest request = RequestConverter.
buildRevokeRequest(userShortName, t, f, q, permActions.toArray(
buildRevokeRequest(userShortName, tableName, f, q, permActions.toArray(
new AccessControlProtos.Permission.Action[actions.length]));
protocol.revoke(null, request);
}
@ -1822,11 +1824,11 @@ public final class ProtobufUtil {
*/
public static List<UserPermission> getUserPermissions(
AccessControlService.BlockingInterface protocol,
byte[] t) throws ServiceException {
TableName t) throws ServiceException {
AccessControlProtos.UserPermissionsRequest.Builder builder =
AccessControlProtos.UserPermissionsRequest.newBuilder();
if (t != null) {
builder.setTable(ByteString.copyFrom(t));
builder.setTableName(ProtobufUtil.toProtoTableName(t));
}
AccessControlProtos.UserPermissionsRequest request = builder.build();
AccessControlProtos.UserPermissionsResponse response =
@ -1988,6 +1990,28 @@ public final class ProtobufUtil {
cell.getValue().toByteArray());
}
public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) {
HBaseProtos.NamespaceDescriptor.Builder b =
HBaseProtos.NamespaceDescriptor.newBuilder()
.setName(ByteString.copyFromUtf8(ns.getName()));
for(Map.Entry<String, String> entry: ns.getConfiguration().entrySet()) {
b.addConfiguration(HBaseProtos.NameStringPair.newBuilder()
.setName(entry.getKey())
.setValue(entry.getValue()));
}
return b.build();
}
public static NamespaceDescriptor toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor desc) throws IOException {
NamespaceDescriptor.Builder b =
NamespaceDescriptor.create(desc.getName().toStringUtf8());
for(HBaseProtos.NameStringPair prop : desc.getConfigurationList()) {
b.addConfiguration(prop.getName(), prop.getValue());
}
return b.build();
}
/**
* Get an instance of the argument type declared in a class's signature. The
* argument type is assumed to be a PB Message subclass, and the instance is
@ -2029,7 +2053,7 @@ public final class ProtobufUtil {
// input / output paths are relative to the store dir
// store dir is relative to region dir
CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
.setTableName(ByteString.copyFrom(info.getTableName()))
.setTableName(ByteString.copyFrom(info.getTableName().getName()))
.setEncodedRegionName(ByteString.copyFrom(info.getEncodedNameAsBytes()))
.setFamilyName(ByteString.copyFrom(family))
.setStoreHomeDir(storeDir.getName()); //make relative
@ -2077,4 +2101,15 @@ public final class ProtobufUtil {
return "row=" + Bytes.toString(proto.getRow().toByteArray()) +
", type=" + proto.getMutateType().toString();
}
public static TableName toTableName(HBaseProtos.TableName tableNamePB) {
return TableName.valueOf(tableNamePB.getNamespace().toByteArray(),
tableNamePB.getQualifier().toByteArray());
}
public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
return HBaseProtos.TableName.newBuilder()
.setNamespace(ByteString.copyFrom(tableName.getNamespace()))
.setQualifier(ByteString.copyFrom(tableName.getQualifier())).build();
}
}

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -904,9 +905,9 @@ public final class RequestConverter {
* @return an AddColumnRequest
*/
public static AddColumnRequest buildAddColumnRequest(
final byte [] tableName, final HColumnDescriptor column) {
final TableName tableName, final HColumnDescriptor column) {
AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnFamilies(column.convert());
return builder.build();
}
@ -919,9 +920,9 @@ public final class RequestConverter {
* @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(
final byte [] tableName, final byte [] columnName) {
final TableName tableName, final byte [] columnName) {
DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
builder.setColumnName(ByteString.copyFrom(columnName));
return builder.build();
}
@ -934,9 +935,9 @@ public final class RequestConverter {
* @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(
final byte [] tableName, final HColumnDescriptor column) {
final TableName tableName, final HColumnDescriptor column) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
builder.setColumnFamilies(column.convert());
return builder.build();
}
@ -1019,9 +1020,9 @@ public final class RequestConverter {
* @param tableName
* @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final byte [] tableName) {
public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName) {
DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
return builder.build();
}
@ -1031,9 +1032,9 @@ public final class RequestConverter {
* @param tableName
* @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final byte [] tableName) {
public static EnableTableRequest buildEnableTableRequest(final TableName tableName) {
EnableTableRequest.Builder builder = EnableTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
return builder.build();
}
@ -1043,9 +1044,9 @@ public final class RequestConverter {
* @param tableName
* @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final byte [] tableName) {
public static DisableTableRequest buildDisableTableRequest(final TableName tableName) {
DisableTableRequest.Builder builder = DisableTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
return builder.build();
}
@ -1077,9 +1078,9 @@ public final class RequestConverter {
* @return a ModifyTableRequest
*/
public static ModifyTableRequest buildModifyTableRequest(
final byte [] table, final HTableDescriptor hTableDesc) {
final TableName tableName, final HTableDescriptor hTableDesc) {
ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(table));
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
builder.setTableSchema(hTableDesc.convert());
return builder.build();
}
@ -1091,9 +1092,9 @@ public final class RequestConverter {
* @return a GetSchemaAlterStatusRequest
*/
public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest(
final byte [] tableName) {
final TableName tableName) {
GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
return builder.build();
}
@ -1104,11 +1105,11 @@ public final class RequestConverter {
* @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(
final List<String> tableNames) {
final List<TableName> tableNames) {
GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder();
if (tableNames != null) {
for (String str : tableNames) {
builder.addTableNames(str);
for (TableName tableName : tableNames) {
builder.addTableNames(ProtobufUtil.toProtoTableName(tableName));
}
}
return builder.build();
@ -1121,9 +1122,9 @@ public final class RequestConverter {
* @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(
final byte[] tableName) {
final TableName tableName) {
return GetTableDescriptorsRequest.newBuilder()
.addTableNames(Bytes.toString(tableName))
.addTableNames(ProtobufUtil.toProtoTableName(tableName))
.build();
}
@ -1204,22 +1205,22 @@ public final class RequestConverter {
* Create a request to grant user permissions.
*
* @param username the short user name who to grant permissions
* @param table optional table name the permissions apply
* @param tableName optional table name the permissions apply
* @param family optional column family
* @param qualifier optional qualifier
* @param actions the permissions to be granted
* @return A {@link AccessControlProtos} GrantRequest
*/
public static AccessControlProtos.GrantRequest buildGrantRequest(
String username, byte[] table, byte[] family, byte[] qualifier,
String username, TableName tableName, byte[] family, byte[] qualifier,
AccessControlProtos.Permission.Action... actions) {
AccessControlProtos.Permission.Builder permissionBuilder =
AccessControlProtos.Permission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
if (table != null) {
permissionBuilder.setTable(ByteString.copyFrom(table));
if (tableName != null) {
permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
if (family != null) {
permissionBuilder.setFamily(ByteString.copyFrom(family));
@ -1240,22 +1241,22 @@ public final class RequestConverter {
* Create a request to revoke user permissions.
*
* @param username the short user name whose permissions to be revoked
* @param table optional table name the permissions apply
* @param tableName optional table name the permissions apply
* @param family optional column family
* @param qualifier optional qualifier
* @param actions the permissions to be revoked
* @return A {@link AccessControlProtos} RevokeRequest
*/
public static AccessControlProtos.RevokeRequest buildRevokeRequest(
String username, byte[] table, byte[] family, byte[] qualifier,
String username, TableName tableName, byte[] family, byte[] qualifier,
AccessControlProtos.Permission.Action... actions) {
AccessControlProtos.Permission.Builder permissionBuilder =
AccessControlProtos.Permission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
if (table != null) {
permissionBuilder.setTable(ByteString.copyFrom(table));
if (tableName != null) {
permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
if (family != null) {
permissionBuilder.setFamily(ByteString.copyFrom(family));
@ -1290,4 +1291,4 @@ public final class RequestConverter {
}
return builder.build();
}
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
@ -36,7 +37,7 @@ import java.io.IOException;
public class TablePermission extends Permission {
private static Log LOG = LogFactory.getLog(TablePermission.class);
private byte[] table;
private TableName table;
private byte[] family;
private byte[] qualifier;
@ -52,7 +53,7 @@ public class TablePermission extends Permission {
* @param family the family, can be null if a global permission on the table
* @param assigned the list of allowed actions
*/
public TablePermission(byte[] table, byte[] family, Action... assigned) {
public TablePermission(TableName table, byte[] family, Action... assigned) {
this(table, family, null, assigned);
}
@ -63,7 +64,7 @@ public class TablePermission extends Permission {
* @param family the family, can be null if a global permission on the table
* @param assigned the list of allowed actions
*/
public TablePermission(byte[] table, byte[] family, byte[] qualifier,
public TablePermission(TableName table, byte[] family, byte[] qualifier,
Action... assigned) {
super(assigned);
this.table = table;
@ -78,7 +79,7 @@ public class TablePermission extends Permission {
* @param family the family, can be null if a global permission on the table
* @param actionCodes the list of allowed action codes
*/
public TablePermission(byte[] table, byte[] family, byte[] qualifier,
public TablePermission(TableName table, byte[] family, byte[] qualifier,
byte[] actionCodes) {
super(actionCodes);
this.table = table;
@ -90,7 +91,7 @@ public class TablePermission extends Permission {
return table != null;
}
public byte[] getTable() {
public TableName getTable() {
return table;
}
@ -123,9 +124,9 @@ public class TablePermission extends Permission {
* @return <code>true</code> if the action within the given scope is allowed
* by this permission, <code>false</code>
*/
public boolean implies(byte[] table, byte[] family, byte[] qualifier,
public boolean implies(TableName table, byte[] family, byte[] qualifier,
Action action) {
if (!Bytes.equals(this.table, table)) {
if (!this.table.equals(table)) {
return false;
}
@ -154,8 +155,8 @@ public class TablePermission extends Permission {
* @return <code>true</code> if the action is allowed over the given scope
* by this permission, otherwise <code>false</code>
*/
public boolean implies(byte[] table, KeyValue kv, Action action) {
if (!Bytes.equals(this.table, table)) {
public boolean implies(TableName table, KeyValue kv, Action action) {
if (!this.table.equals(table)) {
return false;
}
@ -183,8 +184,8 @@ public class TablePermission extends Permission {
* column-qualifier specific permission, for example, implies() would still
* return false.
*/
public boolean matchesFamily(byte[] table, byte[] family, Action action) {
if (!Bytes.equals(this.table, table)) {
public boolean matchesFamily(TableName table, byte[] family, Action action) {
if (!this.table.equals(table)) {
return false;
}
@ -208,7 +209,7 @@ public class TablePermission extends Permission {
* @return <code>true</code> if the table, family and qualifier match,
* otherwise <code>false</code>
*/
public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier,
public boolean matchesFamilyQualifier(TableName table, byte[] family, byte[] qualifier,
Action action) {
if (!matchesFamily(table, family, action)) {
return false;
@ -229,7 +230,7 @@ public class TablePermission extends Permission {
}
TablePermission other = (TablePermission)obj;
if (!(Bytes.equals(table, other.getTable()) &&
if (!(table.equals(other.getTable()) &&
((family == null && other.getFamily() == null) ||
Bytes.equals(family, other.getFamily())) &&
((qualifier == null && other.getQualifier() == null) ||
@ -247,7 +248,7 @@ public class TablePermission extends Permission {
final int prime = 37;
int result = super.hashCode();
if (table != null) {
result = prime * result + Bytes.hashCode(table);
result = prime * result + table.hashCode();
}
if (family != null) {
result = prime * result + Bytes.hashCode(family);
@ -260,7 +261,7 @@ public class TablePermission extends Permission {
public String toString() {
StringBuilder str = new StringBuilder("[TablePermission: ")
.append("table=").append(Bytes.toString(table))
.append("table=").append(table)
.append(", family=").append(Bytes.toString(family))
.append(", qualifier=").append(Bytes.toString(qualifier))
.append(", actions=");
@ -282,7 +283,8 @@ public class TablePermission extends Permission {
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
table = Bytes.readByteArray(in);
byte[] tableBytes = Bytes.readByteArray(in);
table = TableName.valueOf(tableBytes);
if (in.readBoolean()) {
family = Bytes.readByteArray(in);
}
@ -294,7 +296,7 @@ public class TablePermission extends Permission {
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
Bytes.writeByteArray(out, table);
Bytes.writeByteArray(out, table.getName());
out.writeBoolean(family != null);
if (family != null) {
Bytes.writeByteArray(out, family);

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.DataInput;
@ -69,7 +70,7 @@ public class UserPermission extends TablePermission {
* table
* @param assigned the list of allowed actions
*/
public UserPermission(byte[] user, byte[] table, byte[] family,
public UserPermission(byte[] user, TableName table, byte[] family,
Action... assigned) {
super(table, family, assigned);
this.user = user;
@ -86,7 +87,7 @@ public class UserPermission extends TablePermission {
* over the entire column family
* @param assigned the list of allowed actions
*/
public UserPermission(byte[] user, byte[] table, byte[] family,
public UserPermission(byte[] user, TableName table, byte[] family,
byte[] qualifier, Action... assigned) {
super(table, family, qualifier, assigned);
this.user = user;
@ -103,7 +104,7 @@ public class UserPermission extends TablePermission {
* over the entire column family
* @param actionCodes the list of allowed action codes
*/
public UserPermission(byte[] user, byte[] table, byte[] family,
public UserPermission(byte[] user, TableName table, byte[] family,
byte[] qualifier, byte[] actionCodes) {
super(table, family, qualifier, actionCodes);
this.user = user;
@ -117,8 +118,8 @@ public class UserPermission extends TablePermission {
* Returns true if this permission describes a global user permission.
*/
public boolean isGlobal() {
byte[] tableName = getTable();
return(tableName == null || tableName.length == 0);
TableName tableName = getTable();
return(tableName == null);
}
@Override

View File

@ -20,7 +20,9 @@
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
@ -38,15 +40,16 @@ public class ClientSnapshotDescriptionUtils {
*/
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
throws IllegalArgumentException {
// FIXME these method names is really bad - trunk will probably change
// .META. and -ROOT- snapshots are not allowed
if (HTableDescriptor.isMetaTable(Bytes.toBytes(snapshot.getTable()))) {
throw new IllegalArgumentException(".META. and -ROOT- snapshots are not allowed");
}
// make sure the snapshot name is valid
HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getName()));
// make sure the table name is valid
HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getTable()));
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()));
if(snapshot.hasTable()) {
// make sure the table name is valid, this will implicitly check validity
TableName tableName = TableName.valueOf(snapshot.getTable());
if (HTableDescriptor.isSystemTable(tableName)) {
throw new IllegalArgumentException("System table snapshots are not allowed");
}
}
}
/**
@ -60,7 +63,8 @@ public class ClientSnapshotDescriptionUtils {
if (ssd == null) {
return null;
}
return "{ ss=" + ssd.getName() + " table=" + ssd.getTable()
+ " type=" + ssd.getType() + " }";
return "{ ss=" + ssd.getName() +
" table=" + (ssd.hasTable()?TableName.valueOf(ssd.getTable()):"") +
" type=" + ssd.getType() + " }";
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
@ -45,7 +46,14 @@ public class TablePartiallyOpenException extends IOException {
/**
* @param tableName Name of table that is partial open
*/
public TablePartiallyOpenException(byte[] tableName) {
this(Bytes.toString(tableName));
public TablePartiallyOpenException(TableName tableName) {
this(tableName.getNameAsString());
}
/**
* @param tableName Name of table that is partial open
*/
public TablePartiallyOpenException(byte[] tableName) {
this(Bytes.toString(tableName));
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.zookeeper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.zookeeper.KeeperException;
@ -56,8 +57,8 @@ public class ZKTable {
* for every query. Synchronize access rather than use concurrent Map because
* synchronization needs to span query of zk.
*/
private final Map<String, ZooKeeperProtos.Table.State> cache =
new HashMap<String, ZooKeeperProtos.Table.State>();
private final Map<TableName, ZooKeeperProtos.Table.State> cache =
new HashMap<TableName, ZooKeeperProtos.Table.State>();
// TODO: Make it so always a table znode. Put table schema here as well as table state.
// Have watcher on table znode so all are notified of state or schema change.
@ -78,8 +79,9 @@ public class ZKTable {
List<String> children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode);
if (children == null) return;
for (String child: children) {
ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, child);
if (state != null) this.cache.put(child, state);
TableName tableName = TableName.valueOf(child);
ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, tableName);
if (state != null) this.cache.put(tableName, state);
}
}
}
@ -90,7 +92,7 @@ public class ZKTable {
* @param tableName
* @throws KeeperException unexpected zookeeper exception
*/
public void setDisabledTable(String tableName)
public void setDisabledTable(TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (!isDisablingOrDisabledTable(tableName)) {
@ -107,7 +109,7 @@ public class ZKTable {
* @param tableName
* @throws KeeperException unexpected zookeeper exception
*/
public void setDisablingTable(final String tableName)
public void setDisablingTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (!isEnabledOrDisablingTable(tableName)) {
@ -124,7 +126,7 @@ public class ZKTable {
* @param tableName
* @throws KeeperException unexpected zookeeper exception
*/
public void setEnablingTable(final String tableName)
public void setEnablingTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (!isDisabledOrEnablingTable(tableName)) {
@ -142,7 +144,7 @@ public class ZKTable {
* @return if the operation succeeds or not
* @throws KeeperException unexpected zookeeper exception
*/
public boolean checkAndSetEnablingTable(final String tableName)
public boolean checkAndSetEnablingTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (isEnablingTable(tableName)) {
@ -160,7 +162,7 @@ public class ZKTable {
* @return if the operation succeeds or not
* @throws KeeperException unexpected zookeeper exception
*/
public boolean checkDisabledAndSetEnablingTable(final String tableName)
public boolean checkDisabledAndSetEnablingTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (!isDisabledTable(tableName)) {
@ -178,7 +180,7 @@ public class ZKTable {
* @return if the operation succeeds or not
* @throws KeeperException unexpected zookeeper exception
*/
public boolean checkEnabledAndSetDisablingTable(final String tableName)
public boolean checkEnabledAndSetDisablingTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (this.cache.get(tableName) != null && !isEnabledTable(tableName)) {
@ -189,9 +191,9 @@ public class ZKTable {
}
}
private void setTableState(final String tableName, final ZooKeeperProtos.Table.State state)
private void setTableState(final TableName tableName, final ZooKeeperProtos.Table.State state)
throws KeeperException {
String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName);
String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString());
if (ZKUtil.checkExists(this.watcher, znode) == -1) {
ZKUtil.createAndFailSilent(this.watcher, znode);
}
@ -204,41 +206,41 @@ public class ZKTable {
}
}
public boolean isDisabledTable(final String tableName) {
public boolean isDisabledTable(final TableName tableName) {
return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED);
}
public boolean isDisablingTable(final String tableName) {
public boolean isDisablingTable(final TableName tableName) {
return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLING);
}
public boolean isEnablingTable(final String tableName) {
public boolean isEnablingTable(final TableName tableName) {
return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLING);
}
public boolean isEnabledTable(String tableName) {
public boolean isEnabledTable(TableName tableName) {
return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
}
public boolean isDisablingOrDisabledTable(final String tableName) {
public boolean isDisablingOrDisabledTable(final TableName tableName) {
synchronized (this.cache) {
return isDisablingTable(tableName) || isDisabledTable(tableName);
}
}
public boolean isEnabledOrDisablingTable(final String tableName) {
public boolean isEnabledOrDisablingTable(final TableName tableName) {
synchronized (this.cache) {
return isEnabledTable(tableName) || isDisablingTable(tableName);
}
}
public boolean isDisabledOrEnablingTable(final String tableName) {
public boolean isDisabledOrEnablingTable(final TableName tableName) {
synchronized (this.cache) {
return isDisabledTable(tableName) || isEnablingTable(tableName);
}
}
private boolean isTableState(final String tableName, final ZooKeeperProtos.Table.State state) {
private boolean isTableState(final TableName tableName, final ZooKeeperProtos.Table.State state) {
synchronized (this.cache) {
ZooKeeperProtos.Table.State currentState = this.cache.get(tableName);
return ZKTableReadOnly.isTableState(currentState, state);
@ -251,7 +253,7 @@ public class ZKTable {
* @param tableName
* @throws KeeperException unexpected zookeeper exception
*/
public void setDeletedTable(final String tableName)
public void setDeletedTable(final TableName tableName)
throws KeeperException {
synchronized (this.cache) {
if (this.cache.remove(tableName) == null) {
@ -259,7 +261,7 @@ public class ZKTable {
"already deleted");
}
ZKUtil.deleteNodeFailSilent(this.watcher,
ZKUtil.joinZNode(this.watcher.tableZNode, tableName));
ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
}
}
@ -270,7 +272,7 @@ public class ZKTable {
* @param tableName
* @throws KeeperException
*/
public void setEnabledTable(final String tableName) throws KeeperException {
public void setEnabledTable(final TableName tableName) throws KeeperException {
setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED);
}
@ -280,7 +282,7 @@ public class ZKTable {
* @param tableName
* @return true if the table is present
*/
public boolean isTablePresent(final String tableName) {
public boolean isTablePresent(final TableName tableName) {
synchronized (this.cache) {
ZooKeeperProtos.Table.State state = this.cache.get(tableName);
return !(state == null);
@ -291,11 +293,11 @@ public class ZKTable {
* Gets a list of all the tables set as disabled in zookeeper.
* @return Set of disabled tables, empty Set if none
*/
public Set<String> getDisabledTables() {
Set<String> disabledTables = new HashSet<String>();
public Set<TableName> getDisabledTables() {
Set<TableName> disabledTables = new HashSet<TableName>();
synchronized (this.cache) {
Set<String> tables = this.cache.keySet();
for (String table: tables) {
Set<TableName> tables = this.cache.keySet();
for (TableName table: tables) {
if (isDisabledTable(table)) disabledTables.add(table);
}
}
@ -307,7 +309,7 @@ public class ZKTable {
* @return Set of disabled tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getDisabledTables(ZooKeeperWatcher zkw)
public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
throws KeeperException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED);
}
@ -317,7 +319,7 @@ public class ZKTable {
* @return Set of disabling tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getDisablingTables(ZooKeeperWatcher zkw)
public static Set<TableName> getDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING);
}
@ -327,7 +329,7 @@ public class ZKTable {
* @return Set of enabling tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getEnablingTables(ZooKeeperWatcher zkw)
public static Set<TableName> getEnablingTables(ZooKeeperWatcher zkw)
throws KeeperException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING);
}
@ -337,7 +339,7 @@ public class ZKTable {
* @return Set of disabled tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException {
return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED,
ZooKeeperProtos.Table.State.DISABLING);
@ -352,14 +354,14 @@ public class ZKTable {
* @param deleteZNode
* @throws KeeperException
*/
public void removeEnablingTable(final String tableName, boolean deleteZNode)
public void removeEnablingTable(final TableName tableName, boolean deleteZNode)
throws KeeperException {
synchronized (this.cache) {
if (isEnablingTable(tableName)) {
this.cache.remove(tableName);
if (deleteZNode) {
ZKUtil.deleteNodeFailSilent(this.watcher,
ZKUtil.joinZNode(this.watcher.tableZNode, tableName));
ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()));
}
}
}
@ -371,17 +373,18 @@ public class ZKTable {
* @return Set of tables of specified states, empty Set if none
* @throws KeeperException
*/
static Set<String> getAllTables(final ZooKeeperWatcher zkw,
static Set<TableName> getAllTables(final ZooKeeperWatcher zkw,
final ZooKeeperProtos.Table.State... states) throws KeeperException {
Set<String> allTables = new HashSet<String>();
Set<TableName> allTables = new HashSet<TableName>();
List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
if(children == null) return allTables;
for (String child: children) {
ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, child);
TableName tableName = TableName.valueOf(child);
ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, tableName);
for (ZooKeeperProtos.Table.State expectedState: states) {
if (state == expectedState) {
allTables.add(child);
allTables.add(tableName);
break;
}
}

View File

@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.zookeeper;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@ -50,7 +51,7 @@ public class ZKTableReadOnly {
* @throws KeeperException
*/
public static boolean isDisabledTable(final ZooKeeperWatcher zkw,
final String tableName)
final TableName tableName)
throws KeeperException {
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
return isTableState(ZooKeeperProtos.Table.State.DISABLED, state);
@ -66,7 +67,7 @@ public class ZKTableReadOnly {
* @throws KeeperException
*/
public static boolean isEnabledTable(final ZooKeeperWatcher zkw,
final String tableName)
final TableName tableName)
throws KeeperException {
return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED;
}
@ -82,7 +83,7 @@ public class ZKTableReadOnly {
* @throws KeeperException
*/
public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw,
final String tableName)
final TableName tableName)
throws KeeperException {
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) ||
@ -94,14 +95,16 @@ public class ZKTableReadOnly {
* @return Set of disabled tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getDisabledTables(ZooKeeperWatcher zkw)
public static Set<TableName> getDisabledTables(ZooKeeperWatcher zkw)
throws KeeperException {
Set<String> disabledTables = new HashSet<String>();
Set<TableName> disabledTables = new HashSet<TableName>();
List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
for (String child: children) {
ZooKeeperProtos.Table.State state = getTableState(zkw, child);
if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(child);
TableName tableName =
TableName.valueOf(child);
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName);
}
return disabledTables;
}
@ -111,16 +114,18 @@ public class ZKTableReadOnly {
* @return Set of disabled tables, empty Set if none
* @throws KeeperException
*/
public static Set<String> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
public static Set<TableName> getDisabledOrDisablingTables(ZooKeeperWatcher zkw)
throws KeeperException {
Set<String> disabledTables = new HashSet<String>();
Set<TableName> disabledTables = new HashSet<TableName>();
List<String> children =
ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
for (String child: children) {
ZooKeeperProtos.Table.State state = getTableState(zkw, child);
TableName tableName =
TableName.valueOf(child);
ZooKeeperProtos.Table.State state = getTableState(zkw, tableName);
if (state == ZooKeeperProtos.Table.State.DISABLED ||
state == ZooKeeperProtos.Table.State.DISABLING)
disabledTables.add(child);
disabledTables.add(tableName);
}
return disabledTables;
}
@ -132,14 +137,14 @@ public class ZKTableReadOnly {
/**
* @param zkw
* @param child
* @param tableName
* @return Null or {@link ZooKeeperProtos.Table.State} found in znode.
* @throws KeeperException
*/
static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw,
final String child)
final TableName tableName)
throws KeeperException {
String znode = ZKUtil.joinZNode(zkw.tableZNode, child);
String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
byte [] data = ZKUtil.getData(zkw, znode);
if (data == null || data.length <= 0) return null;
try {

View File

@ -109,6 +109,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
public String tableLockZNode;
// znode containing the state of recovering regions
public String recoveringRegionsZNode;
// znode containing namespace descriptors
public static String namespaceZNode = "namespace";
// Certain ZooKeeper nodes need to be world-readable
public static final ArrayList<ACL> CREATOR_ALL_AND_WORLD_READABLE =
@ -231,7 +234,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
tableLockZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.tableLock", "table-lock"));
recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
namespaceZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.namespace", "namespace"));
}
/**

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
@ -47,16 +48,17 @@ import java.util.concurrent.atomic.AtomicInteger;
@Category(MediumTests.class)
public class TestAsyncProcess {
private static final byte[] DUMMY_TABLE = "DUMMY_TABLE".getBytes();
private static final TableName DUMMY_TABLE =
TableName.valueOf("DUMMY_TABLE");
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
private static final byte[] FAILS = "FAILS".getBytes();
private static final Configuration conf = new Configuration();
private static ServerName sn = new ServerName("localhost:10,1254");
private static HRegionInfo hri1 = new HRegionInfo(DUMMY_BYTES_1);
private static HRegionInfo hri2 = new HRegionInfo(DUMMY_BYTES_1);
private static HRegionInfo hri1 = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2);
private static HRegionInfo hri2 =
new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW);
private static HRegionLocation loc1 = new HRegionLocation(hri1, sn);
private static HRegionLocation loc2 = new HRegionLocation(hri2, sn);
@ -118,7 +120,8 @@ public class TestAsyncProcess {
}
@Override
protected <R> AsyncProcess createAsyncProcess(byte[] tableName, ExecutorService pool,
protected <R> AsyncProcess createAsyncProcess(TableName tableName,
ExecutorService pool,
AsyncProcess.AsyncProcessCallback<R> callback,
Configuration conf) {
ap = new MyAsyncProcess<R>(this, callback, conf);
@ -126,7 +129,7 @@ public class TestAsyncProcess {
}
@Override
public HRegionLocation locateRegion(final byte[] tableName,
public HRegionLocation locateRegion(final TableName tableName,
final byte[] row) {
return loc1;
}

View File

@ -26,6 +26,7 @@ import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -82,7 +83,7 @@ public class TestClientNoCluster {
}
@Override
public boolean isTableOnlineState(byte[] tableName, boolean enabled)
public boolean isTableOnlineState(TableName tableName, boolean enabled)
throws IOException {
return enabled;
}
@ -103,7 +104,7 @@ public class TestClientNoCluster {
Configuration localConfig = HBaseConfiguration.create(this.conf);
// This override mocks up our exists/get call to throw a RegionServerStoppedException.
localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName());
HTable table = new HTable(localConfig, HConstants.META_TABLE_NAME);
HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
Throwable t = null;
LOG.info("Start");
try {
@ -140,7 +141,7 @@ public class TestClientNoCluster {
// and it has expired. Otherwise, if this functionality is broke, all retries will be run --
// all ten of them -- and we'll get the RetriesExhaustedException exception.
localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1);
HTable table = new HTable(localConfig, HConstants.META_TABLE_NAME);
HTable table = new HTable(localConfig, TableName.META_TABLE_NAME);
Throwable t = null;
try {
// An exists call turns into a get w/ a flag.
@ -172,7 +173,7 @@ public class TestClientNoCluster {
// Go against meta else we will try to find first region for the table on construction which
// means we'll have to do a bunch more mocking. Tests that go against meta only should be
// good for a bit of testing.
HTable table = new HTable(this.conf, HConstants.META_TABLE_NAME);
HTable table = new HTable(this.conf, TableName.META_TABLE_NAME);
ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY);
try {
Result result = null;
@ -192,7 +193,7 @@ public class TestClientNoCluster {
// Go against meta else we will try to find first region for the table on construction which
// means we'll have to do a bunch more mocking. Tests that go against meta only should be
// good for a bit of testing.
HTable table = new HTable(this.conf, HConstants.META_TABLE_NAME);
HTable table = new HTable(this.conf, TableName.META_TABLE_NAME);
ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY);
try {
Result result = null;
@ -295,4 +296,4 @@ public class TestClientNoCluster {
return this.stub;
}
}
}
}

View File

@ -22,12 +22,16 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
@ -100,7 +104,7 @@ public class TestSnapshotFromAdmin {
// setup the admin and run the test
HBaseAdmin admin = new HBaseAdmin(mockConnection);
String snapshot = "snapshot";
String table = "table";
TableName table = TableName.valueOf("table");
// get start time
long start = System.currentTimeMillis();
admin.snapshot(snapshot, table);
@ -128,6 +132,7 @@ public class TestSnapshotFromAdmin {
failSnapshotStart(admin, builder.setName("-snapshot").build());
failSnapshotStart(admin, builder.setName("snapshot fails").build());
failSnapshotStart(admin, builder.setName("snap$hot").build());
failSnapshotStart(admin, builder.setName("snap:hot").build());
// check the table name also get verified
failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
@ -144,7 +149,7 @@ public class TestSnapshotFromAdmin {
IsSnapshotDoneResponse doneResponse = IsSnapshotDoneResponse.newBuilder().setDone(true).build();
Mockito.when(
master.isSnapshotDone((RpcController) Mockito.isNull(),
Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse);
Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse);
// make sure that we can use valid names
admin.snapshot(builder.setName("snapshot").setTable("table").build());

View File

@ -38,9 +38,11 @@ import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Public
@InterfaceStability.Stable
public final class HConstants {
//Bytes.UTF8_ENCODING should be updated if this changed
/** When we encode strings, we always specify UTF8 encoding */
public static final String UTF8_ENCODING = "UTF-8";
//Bytes.UTF8_CHARSET should be updated if this changed
/** When we encode strings, we always specify UTF8 encoding */
public static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
/**
@ -104,9 +106,10 @@ public final class HConstants {
* Version 5 changes versions in catalog table regions.
* Version 6 enables blockcaching on catalog tables.
* Version 7 introduces hfile -- hbase 0.19 to 0.20..
* Version 8 introduces namespace
*/
// public static final String FILE_SYSTEM_VERSION = "6";
public static final String FILE_SYSTEM_VERSION = "7";
public static final String FILE_SYSTEM_VERSION = "8";
// Configuration parameters
@ -349,11 +352,7 @@ public final class HConstants {
// be the first to be reassigned if the server(s) they are being served by
// should go down.
/** The root table's name.*/
public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
/** The META table's name. */
public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
public static final String BASE_NAMESPACE_DIR = ".data";
/** delimiter used between portions of a region name */
public static final int META_ROW_DELIMITER = ',';
@ -826,12 +825,12 @@ public final class HConstants {
Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME,
HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME,
HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY,
OLD_SNAPSHOT_DIR_NAME }));
OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR}));
/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
new String[] { Bytes.toString(META_TABLE_NAME), Bytes.toString(ROOT_TABLE_NAME) },
new String[] { TableName.META_TABLE_NAME.getNameAsString(), TableName.ROOT_TABLE_NAME.getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));
/** Health script related settings. */

View File

@ -68,6 +68,10 @@ import com.google.common.primitives.Longs;
@InterfaceStability.Evolving
public class KeyValue implements Cell, HeapSize, Cloneable {
static final Log LOG = LogFactory.getLog(KeyValue.class);
private static final int META_LENGTH =
TableName.META_TABLE_NAME.getName().length; // 'hbase.meta' length
// TODO: Group Key-only comparators and operations into a Key class, just
// for neatness sake, if can figure what to call it.
@ -123,11 +127,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
* @param tableName The table name.
* @return The comparator.
*/
public static KeyComparator getRowComparator(byte [] tableName) {
if(Bytes.equals(HConstants.ROOT_TABLE_NAME,tableName)) {
public static KeyComparator getRowComparator(TableName tableName) {
if(TableName.ROOT_TABLE_NAME.equals(tableName)) {
return ROOT_COMPARATOR.getRawComparator();
}
if(Bytes.equals(HConstants.META_TABLE_NAME, tableName)) {
if(TableName.META_TABLE_NAME.equals(tableName)) {
return META_COMPARATOR.getRawComparator();
}
return COMPARATOR.getRawComparator();
@ -2399,14 +2403,13 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
// Rows look like this: .META.,ROW_FROM_META,RID
// LOG.info("ROOT " + Bytes.toString(left, loffset, llength) +
// "---" + Bytes.toString(right, roffset, rlength));
final int metalength = 7; // '.META.' length
int lmetaOffsetPlusDelimiter = loffset + metalength;
int lmetaOffsetPlusDelimiter = loffset + META_LENGTH + 1;
int leftFarDelimiter = getDelimiterInReverse(left,
lmetaOffsetPlusDelimiter,
llength - metalength, HConstants.DELIMITER);
int rmetaOffsetPlusDelimiter = roffset + metalength;
llength - META_LENGTH - 1, HConstants.DELIMITER);
int rmetaOffsetPlusDelimiter = roffset + META_LENGTH + 1;
int rightFarDelimiter = getDelimiterInReverse(right,
rmetaOffsetPlusDelimiter, rlength - metalength,
rmetaOffsetPlusDelimiter, rlength - META_LENGTH - 1,
HConstants.DELIMITER);
if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) {
// Nothing between .META. and regionid. Its first key.

View File

@ -0,0 +1,202 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
/**
* Namespace POJO class. Used to represent and define namespaces.
*
* Descriptors will be persisted in an hbase table.
* This works since namespaces are essentially metadata of a group of tables
* as opposed to a more tangible container.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class NamespaceDescriptor {
/** System namespace name. */
public static final byte [] SYSTEM_NAMESPACE_NAME = Bytes.toBytes("hbase");
public static final String SYSTEM_NAMESPACE_NAME_STR =
Bytes.toString(SYSTEM_NAMESPACE_NAME);
/** Default namespace name. */
public static final byte [] DEFAULT_NAMESPACE_NAME = Bytes.toBytes("default");
public static final String DEFAULT_NAMESPACE_NAME_STR =
Bytes.toString(DEFAULT_NAMESPACE_NAME);
public static final NamespaceDescriptor DEFAULT_NAMESPACE = NamespaceDescriptor.create(
DEFAULT_NAMESPACE_NAME_STR).build();
public static final NamespaceDescriptor SYSTEM_NAMESPACE = NamespaceDescriptor.create(
SYSTEM_NAMESPACE_NAME_STR).build();
public final static Set<String> RESERVED_NAMESPACES;
static {
Set<String> set = new HashSet<String>();
set.add(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
set.add(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
RESERVED_NAMESPACES = Collections.unmodifiableSet(set);
}
public final static Set<byte[]> RESERVED_NAMESPACES_BYTES;
static {
Set<byte[]> set = new TreeSet<byte[]>(Bytes.BYTES_RAWCOMPARATOR);
for(String name: RESERVED_NAMESPACES) {
set.add(Bytes.toBytes(name));
}
RESERVED_NAMESPACES_BYTES = Collections.unmodifiableSet(set);
}
private String name;
private Map<String, String> configuration;
public static final Comparator<NamespaceDescriptor> NAMESPACE_DESCRIPTOR_COMPARATOR =
new Comparator<NamespaceDescriptor>() {
@Override
public int compare(NamespaceDescriptor namespaceDescriptor,
NamespaceDescriptor namespaceDescriptor2) {
return namespaceDescriptor.getName().compareTo(namespaceDescriptor2.getName());
}
};
private NamespaceDescriptor() {
}
private NamespaceDescriptor(String name) {
this.name = name;
}
public String getName() {
return name;
}
/**
* Getter for accessing the configuration value by key
*/
public String getConfigurationValue(String key) {
return configuration.get(key);
}
/**
* Getter for fetching an unmodifiable {@link #configuration} map.
*/
public Map<String, String> getConfiguration() {
// shallow pointer copy
return Collections.unmodifiableMap(configuration);
}
/**
* Setter for storing a configuration setting in {@link #configuration} map.
* @param key Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value String value. If null, removes the setting.
*/
public void setConfiguration(String key, String value) {
if (value == null) {
removeConfiguration(key);
} else {
configuration.put(key, value);
}
}
/**
* Remove a config setting represented by the key from the {@link #configuration} map
*/
public void removeConfiguration(final String key) {
configuration.remove(key);
}
@Override
public String toString() {
StringBuilder s = new StringBuilder();
s.append('{');
s.append(HConstants.NAME);
s.append(" => '");
s.append(name);
s.append("'");
for (Map.Entry<String, String> e : configuration.entrySet()) {
String key = e.getKey();
String value = e.getValue();
if (key == null) {
continue;
}
s.append(", ");
s.append(key);
s.append(" => '");
s.append(value);
s.append("'");
}
s.append('}');
return s.toString();
}
public static Builder create(String name) {
return new Builder(name);
}
public static Builder create(NamespaceDescriptor ns) {
return new Builder(ns);
}
public static class Builder {
private String bName;
private Map<String, String> bConfiguration = new TreeMap<String, String>();
private Builder(NamespaceDescriptor ns) {
this.bName = ns.name;
this.bConfiguration = ns.configuration;
}
private Builder(String name) {
this.bName = name;
}
public Builder addConfiguration(Map<String, String> configuration) {
this.bConfiguration.putAll(configuration);
return this;
}
public Builder addConfiguration(String key, String value) {
this.bConfiguration.put(key, value);
return this;
}
public Builder removeConfiguration(String key) {
this.bConfiguration.remove(key);
return this;
}
public NamespaceDescriptor build() {
if (this.bName == null){
throw new IllegalArgumentException("A name has to be specified in a namespace.");
}
NamespaceDescriptor desc = new NamespaceDescriptor(this.bName);
desc.configuration = this.bConfiguration;
return desc;
}
}
}

View File

@ -0,0 +1,306 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Immutable POJO class for representing a table name.
* Which is of the form:
* &lt;table namespace&gt;:&lt;table qualifier&gt;
*
* Two special namespaces:
*
* 1. hbase - system namespace, used to contain hbase internal tables
* 2. default - tables with no explicit specified namespace will
* automatically fall into this namespace.
*
* ie
*
* a) foo:bar, means namespace=foo and qualifier=bar
* b) bar, means namespace=default and qualifier=bar
* c) default:bar, means namespace=default and qualifier=bar
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class TableName implements Comparable<TableName> {
/** Namespace delimiter */
//this should always be only 1 byte long
public final static char NAMESPACE_DELIM = ':';
// A non-capture group so that this can be embedded.
// regex is a bit more complicated to support nuance of tables
// in default namespace
//Allows only letters, digits and '_'
public static final String VALID_NAMESPACE_REGEX =
"(?:[a-zA-Z_0-9]+)";
//Allows only letters, digits, '_', '-' and '.'
public static final String VALID_TABLE_QUALIFIER_REGEX =
"(?:[a-zA-Z_0-9][a-zA-Z_0-9-.]*)";
//Concatenation of NAMESPACE_REGEX and TABLE_QUALIFIER_REGEX,
//with NAMESPACE_DELIM as delimiter
public static final String VALID_USER_TABLE_REGEX =
"(?:(?:(?:"+VALID_NAMESPACE_REGEX+"\\"+NAMESPACE_DELIM+")?)" +
"(?:"+VALID_TABLE_QUALIFIER_REGEX+"))";
/** The root table's name.*/
public static final TableName ROOT_TABLE_NAME =
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "root");
/** The META table's name. */
public static final TableName META_TABLE_NAME =
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
/** The Namespace table's name. */
public static final TableName NAMESPACE_TABLE_NAME =
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
private static final String OLD_META_STR = ".META.";
private static final String OLD_ROOT_STR = "-ROOT-";
private byte[] name;
private String nameAsString;
private byte[] namespace;
private String namespaceAsString;
private byte[] qualifier;
private String qualifierAsString;
private TableName() {}
/**
* Check passed byte array, "tableName", is legal user-space table name.
* @return Returns passed <code>tableName</code> param
* @throws IllegalArgumentException if passed a tableName is null or
* is made of other than 'word' characters or underscores: i.e.
* <code>[a-zA-Z_0-9.-:]</code>. The ':' is used to delimit the namespace
* from the table name and can be used for nothing else.
*
* Namespace names can only contain 'word' characters
* <code>[a-zA-Z_0-9]</code> or '_'
*
* Qualifier names can only contain 'word' characters
* <code>[a-zA-Z_0-9]</code> or '_', '.' or '-'.
* The name may not start with '.' or '-'.
*
* Valid fully qualified table names:
* foo:bar, namespace=>foo, table=>bar
* org:foo.bar, namespace=org, table=>foo.bar
*/
public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) {
if (tableName == null || tableName.length <= 0) {
throw new IllegalArgumentException("Name is null or empty");
}
int namespaceDelimIndex = com.google.common.primitives.Bytes.lastIndexOf(tableName,
(byte) NAMESPACE_DELIM);
if (namespaceDelimIndex == 0 || namespaceDelimIndex == -1){
isLegalTableQualifierName(tableName);
} else {
isLegalNamespaceName(tableName, 0, namespaceDelimIndex);
isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, tableName.length);
}
return tableName;
}
public static void isLegalTableQualifierName(final byte[] qualifierName){
isLegalTableQualifierName(qualifierName, 0, qualifierName.length);
}
/**
* Qualifier names can only contain 'word' characters
* <code>[a-zA-Z_0-9]</code> or '_', '.' or '-'.
* The name may not start with '.' or '-'.
*
* @param qualifierName byte array containing the qualifier name
* @param start start index
* @param end end index (exclusive)
*/
public static void isLegalTableQualifierName(final byte[] qualifierName,
int start,
int end){
if(end - start < 1) {
throw new IllegalArgumentException("Table qualifier must not be empty");
}
if (qualifierName[start] == '.' || qualifierName[start] == '-') {
throw new IllegalArgumentException("Illegal first character <" + qualifierName[0] +
"> at 0. Namespaces can only start with alphanumeric " +
"characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(qualifierName));
}
for (int i = start; i < end; i++) {
if (Character.isLetterOrDigit(qualifierName[i]) ||
qualifierName[i] == '_' ||
qualifierName[i] == '-' ||
qualifierName[i] == '.') {
continue;
}
throw new IllegalArgumentException("Illegal character <" + qualifierName[i] +
"> at " + i + ". User-space table qualifiers can only contain " +
"'alphanumeric characters': i.e. [a-zA-Z_0-9-.]: " +
Bytes.toString(qualifierName, start, end));
}
}
public static void isLegalNamespaceName(byte[] namespaceName) {
isLegalNamespaceName(namespaceName, 0, namespaceName.length);
}
/**
* Valid namespace characters are [a-zA-Z_0-9]
* @param namespaceName
* @param offset
* @param length
*/
public static void isLegalNamespaceName(byte[] namespaceName, int offset, int length) {
for (int i = offset; i < length; i++) {
if (Character.isLetterOrDigit(namespaceName[i])|| namespaceName[i] == '_') {
continue;
}
throw new IllegalArgumentException("Illegal character <" + namespaceName[i] +
"> at " + i + ". Namespaces can only contain " +
"'alphanumeric characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(namespaceName,
offset, length));
}
}
public byte[] getName() {
return name;
}
public String getNameAsString() {
return nameAsString;
}
public byte[] getNamespace() {
return namespace;
}
public String getNamespaceAsString() {
return namespaceAsString;
}
public byte[] getQualifier() {
return qualifier;
}
public String getQualifierAsString() {
return qualifierAsString;
}
public byte[] toBytes() {
return name;
}
@Override
public String toString() {
return nameAsString;
}
public static TableName valueOf(byte[] namespace, byte[] qualifier) {
TableName ret = new TableName();
if(namespace == null || namespace.length < 1) {
namespace = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME;
}
ret.namespace = namespace;
ret.namespaceAsString = Bytes.toString(namespace);
ret.qualifier = qualifier;
ret.qualifierAsString = Bytes.toString(qualifier);
finishValueOf(ret);
return ret;
}
public static TableName valueOf(String namespaceAsString, String qualifierAsString) {
TableName ret = new TableName();
if(namespaceAsString == null || namespaceAsString.length() < 1) {
namespaceAsString = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
}
ret.namespaceAsString = namespaceAsString;
ret.namespace = Bytes.toBytes(namespaceAsString);
ret.qualifier = Bytes.toBytes(qualifierAsString);
ret.qualifierAsString = qualifierAsString;
finishValueOf(ret);
return ret;
}
private static void finishValueOf(TableName tableName) {
isLegalNamespaceName(tableName.namespace);
isLegalTableQualifierName(tableName.qualifier);
tableName.nameAsString =
createFullyQualified(tableName.namespaceAsString, tableName.qualifierAsString);
tableName.name = Bytes.toBytes(tableName.nameAsString);
}
public static TableName valueOf(byte[] name) {
return valueOf(Bytes.toString(name));
}
public static TableName valueOf(String name) {
if(name.equals(OLD_ROOT_STR)) {
throw new IllegalArgumentException(OLD_ROOT_STR + " has been deprecated.");
}
if(name.equals(OLD_META_STR)) {
throw new IllegalArgumentException(OLD_META_STR + " no longer exists. The table has been " +
"renamed to "+META_TABLE_NAME);
}
isLegalFullyQualifiedTableName(Bytes.toBytes(name));
int index = name.indexOf(NAMESPACE_DELIM);
if (index != -1) {
return TableName.valueOf(name.substring(0, index), name.substring(index + 1));
}
return TableName.valueOf(NamespaceDescriptor.DEFAULT_NAMESPACE.getName(), name);
}
private static String createFullyQualified(String namespace, String tableQualifier) {
if (namespace.equals(NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) {
return tableQualifier;
}
return namespace+NAMESPACE_DELIM+tableQualifier;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TableName tableName = (TableName) o;
if (!nameAsString.equals(tableName.nameAsString)) return false;
return true;
}
@Override
public int hashCode() {
int result = nameAsString.hashCode();
return result;
}
@Override
public int compareTo(TableName tableName) {
return this.nameAsString.compareTo(tableName.getNameAsString());
}
}

View File

@ -29,6 +29,7 @@ import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.Charset;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.SecureRandom;
@ -42,7 +43,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
@ -61,6 +61,16 @@ import com.google.common.collect.Lists;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Bytes {
//HConstants.UTF8_ENCODING should be updated if this changed
/** When we encode strings, we always specify UTF8 encoding */
private static final String UTF8_ENCODING = "UTF-8";
//HConstants.UTF8_CHARSET should be updated if this changed
/** When we encode strings, we always specify UTF8 encoding */
private static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
//HConstants.EMPTY_BYTE_ARRAY should be updated if this changed
private static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
private static final Log LOG = LogFactory.getLog(Bytes.class);
@ -341,7 +351,7 @@ public class Bytes {
if (len == 0) {
return "";
}
return new String(b, off, len, HConstants.UTF8_CHARSET);
return new String(b, off, len, UTF8_CHARSET);
}
/**
@ -463,7 +473,7 @@ public class Bytes {
* @return the byte array
*/
public static byte[] toBytes(String s) {
return s.getBytes(HConstants.UTF8_CHARSET);
return s.getBytes(UTF8_CHARSET);
}
/**
@ -1295,7 +1305,7 @@ public class Bytes {
* @return New array that has a in lower half and b in upper half.
*/
public static byte [] add(final byte [] a, final byte [] b) {
return add(a, b, HConstants.EMPTY_BYTE_ARRAY);
return add(a, b, EMPTY_BYTE_ARRAY);
}
/**

View File

@ -130,12 +130,16 @@ public class TestKeyValue extends TestCase {
public void testMoreComparisons() throws Exception {
// Root compares
long now = System.currentTimeMillis();
KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now);
KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now);
KeyValue a = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,99999999999999"), now);
KeyValue b = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now);
KVComparator c = new KeyValue.RootComparator();
assertTrue(c.compare(b, a) < 0);
KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now);
KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"),
KeyValue aa = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now);
KeyValue bb = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"),
Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L,
(byte[])null);
assertTrue(c.compare(aa, bb) < 0);
@ -213,32 +217,46 @@ public class TestKeyValue extends TestCase {
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0);
rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"),
rowA = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",testtable,www.hbase.org/,1234,4321"),
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"),
rowB = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",testtable,www.hbase.org/%20,99999,99999"),
Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null);
assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0);
}
private void metacomparisons(final KeyValue.MetaComparator c) {
long now = System.currentTimeMillis();
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now),
new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0);
KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now);
KeyValue b = new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now);
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now),
new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) == 0);
KeyValue a = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now);
KeyValue b = new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), now);
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now),
new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) > 0);
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), now),
new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) > 0);
}
private void comparisons(final KeyValue.KVComparator c) {
long now = System.currentTimeMillis();
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0);
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
new KeyValue(Bytes.toBytes(".META.,,2"), now)) < 0);
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,2"), now),
new KeyValue(Bytes.toBytes(".META.,,1"), now)) > 0);
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now)) == 0);
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,2"), now)) < 0);
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,2"), now),
new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now)) > 0);
}
public void testBinaryKeys() throws Exception {
@ -280,12 +298,12 @@ public class TestKeyValue extends TestCase {
}
// Make up -ROOT- table keys.
KeyValue [] rootKeys = {
new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb),
new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0001,0,3"), fam, qf, 3, nb),
new KeyValue(Bytes.toBytes(".META.,aaaaa,,0,1"), fam, qf, 1, nb),
new KeyValue(Bytes.toBytes(".META.,aaaaa,\u1000,0,5"), fam, qf, 5, nb),
new KeyValue(Bytes.toBytes(".META.,aaaaa,a,0,4"), fam, qf, 4, nb),
new KeyValue(Bytes.toBytes(".META.,,0"), fam, qf, 0, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u0001,0,3"), fam, qf, 3, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,,0,1"), fam, qf, 1, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u1000,0,5"), fam, qf, 5, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,a,0,4"), fam, qf, 4, nb),
new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,0"), fam, qf, 0, nb),
};
// This will output the keys incorrectly.
set = new TreeSet<KeyValue>(new KeyValue.MetaComparator());
@ -561,4 +579,4 @@ public class TestKeyValue extends TestCase {
assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0);
assertTrue(keyComparator.compare(newKey, kv2.getKey()) == 0);
}
}
}

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -216,7 +217,7 @@ public class TestBulkDeleteProtocol {
// @Ignore @Test
public void testBulkDeleteFamily() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteFamily");
HTableDescriptor htd = new HTableDescriptor(tableName);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(FAMILY1));
htd.addFamily(new HColumnDescriptor(FAMILY2));
TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
@ -425,7 +426,7 @@ public class TestBulkDeleteProtocol {
}
private HTable createTable(byte[] tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY1);
hcd.setMaxVersions(10);// Just setting 10 as I am not testing with more than 10 versions here
htd.addFamily(hcd);

View File

@ -23,12 +23,12 @@ import static org.junit.Assert.assertEquals;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -38,10 +38,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.ZooKeeper;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Ignore;
import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
@ -70,7 +66,8 @@ public class TestZooKeeperScanPolicyObserver {
// @Ignore @Test
public void testScanPolicyObserver() throws Exception {
byte[] tableName = Bytes.toBytes("testScanPolicyObserver");
TableName tableName =
TableName.valueOf("testScanPolicyObserver");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor(F)
.setMaxVersions(10)

View File

@ -55,7 +55,8 @@ import org.junit.experimental.categories.Category;
*/
@Category(IntegrationTests.class)
public class IntegrationTestLazyCfLoading {
private static final String TABLE_NAME = IntegrationTestLazyCfLoading.class.getSimpleName();
private static final TableName TABLE_NAME =
TableName.valueOf(IntegrationTestLazyCfLoading.class.getSimpleName());
private static final String TIMEOUT_KEY = "hbase.%s.timeout";
private static final String ENCODING_KEY = "hbase.%s.datablock.encoding";
@ -183,7 +184,7 @@ public class IntegrationTestLazyCfLoading {
Configuration conf = util.getConfiguration();
String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName());
DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF"));
HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(TABLE_NAME));
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
for (byte[] cf : dataGen.getColumnFamilies()) {
HColumnDescriptor hcd = new HColumnDescriptor(cf);
hcd.setDataBlockEncoding(blockEncoding);
@ -221,12 +222,12 @@ public class IntegrationTestLazyCfLoading {
long maxRuntime = conf.getLong(timeoutKey, DEFAULT_TIMEOUT_MINUTES);
long serverCount = util.getHBaseClusterInterface().getClusterStatus().getServersSize();
long keysToWrite = serverCount * KEYS_TO_WRITE_PER_SERVER;
HTable table = new HTable(conf, Bytes.toBytes(TABLE_NAME));
HTable table = new HTable(conf, TABLE_NAME);
// Create multi-threaded writer and start it. We write multiple columns/CFs and verify
// their integrity, therefore multi-put is necessary.
MultiThreadedWriter writer =
new MultiThreadedWriter(dataGen, conf, Bytes.toBytes(TABLE_NAME));
new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
writer.setMultiPut(true);
LOG.info("Starting writer; the number of keys to write is " + keysToWrite);

View File

@ -137,7 +137,7 @@ public class IntegrationTestManyRegions {
@Override
public void run() {
long startTime, endTime;
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
byte[][] splits = algo.split(REGION_COUNT);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -107,10 +108,8 @@ public class IntegrationTestMTTR {
/**
* Configurable table names.
*/
private static String tableName;
private static byte[] tableNameBytes;
private static String loadTableName;
private static byte[] loadTableNameBytes;
private static TableName tableName;
private static TableName loadTableName;
/**
* Util to get at the cluster.
@ -164,13 +163,13 @@ public class IntegrationTestMTTR {
private static void setupActions() throws IOException {
// Set up the action that will restart a region server holding a region from our table
// because this table should only have one region we should be good.
restartRSAction = new ChaosMonkey.RestartRsHoldingTable(SLEEP_TIME, tableName);
restartRSAction = new ChaosMonkey.RestartRsHoldingTable(SLEEP_TIME, tableName.getNameAsString());
// Set up the action that will kill the region holding meta.
restartMetaAction = new ChaosMonkey.RestartRsHoldingMeta(SLEEP_TIME);
// Set up the action that will move the regions of our table.
moveRegionAction = new ChaosMonkey.MoveRegionsOfTable(SLEEP_TIME, tableName);
moveRegionAction = new ChaosMonkey.MoveRegionsOfTable(SLEEP_TIME, tableName.getNameAsString());
// Kill the master
restartMasterAction = new ChaosMonkey.RestartActiveMaster(1000);
@ -185,24 +184,22 @@ public class IntegrationTestMTTR {
private static void setupTables() throws IOException {
// Get the table name.
tableName = util.getConfiguration()
.get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR");
tableNameBytes = Bytes.toBytes(tableName);
tableName = TableName.valueOf(util.getConfiguration()
.get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));
loadTableName = util.getConfiguration()
.get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool");
loadTableNameBytes = Bytes.toBytes(loadTableName);
loadTableName = TableName.valueOf(util.getConfiguration()
.get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));
if (util.getHBaseAdmin().tableExists(tableNameBytes)) {
util.deleteTable(tableNameBytes);
if (util.getHBaseAdmin().tableExists(tableName)) {
util.deleteTable(tableName);
}
if (util.getHBaseAdmin().tableExists(loadTableNameBytes)) {
util.deleteTable(loadTableNameBytes);
if (util.getHBaseAdmin().tableExists(loadTableName)) {
util.deleteTable(loadTableName);
}
// Create the table. If this fails then fail everything.
HTableDescriptor tableDescriptor = new HTableDescriptor(tableNameBytes);
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
// Make the max file size huge so that splits don't happen during the test.
tableDescriptor.setMaxFileSize(Long.MAX_VALUE);
@ -213,7 +210,7 @@ public class IntegrationTestMTTR {
util.getHBaseAdmin().createTable(tableDescriptor);
// Setup the table for LoadTestTool
int ret = loadTool.run(new String[]{"-tn", loadTableName, "-init_only"});
int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"});
assertEquals("Failed to initialize LoadTestTool", 0, ret);
}
@ -400,7 +397,7 @@ public class IntegrationTestMTTR {
public PutCallable(Future f) throws IOException {
super(f);
this.table = new HTable(util.getConfiguration(), tableNameBytes);
this.table = new HTable(util.getConfiguration(), tableName);
}
@Override
@ -427,7 +424,7 @@ public class IntegrationTestMTTR {
public ScanCallable(Future f) throws IOException {
super(f);
this.table = new HTable(util.getConfiguration(), tableNameBytes);
this.table = new HTable(util.getConfiguration(), tableName);
}
@Override
@ -517,7 +514,7 @@ public class IntegrationTestMTTR {
// But always go in just in case some action completes quickly
do {
int ret = loadTool.run(new String[]{
"-tn", loadTableName,
"-tn", loadTableName.getNameAsString(),
"-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads),
"-num_keys", String.valueOf(numKeys),
"-skip_init"

View File

@ -37,6 +37,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -430,7 +431,7 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool {
protected void createSchema() throws IOException {
HBaseAdmin admin = new HBaseAdmin(getConf());
byte[] tableName = getTableName(getConf());
TableName tableName = getTableName(getConf());
if (!admin.tableExists(tableName)) {
HTableDescriptor htd = new HTableDescriptor(getTableName(getConf()));
htd.addFamily(new HColumnDescriptor(FAMILY_NAME));
@ -630,7 +631,7 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool {
scan.setCaching(10000);
scan.setCacheBlocks(false);
TableMapReduceUtil.initTableMapperJob(getTableName(getConf()), scan,
TableMapReduceUtil.initTableMapperJob(getTableName(getConf()).getName(), scan,
VerifyMapper.class, BytesWritable.class, BytesWritable.class, job);
job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false);
@ -942,8 +943,8 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool {
}
}
static byte[] getTableName(Configuration conf) {
return Bytes.toBytes(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
static TableName getTableName(Configuration conf) {
return TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
}
private static CINode getCINode(Result result, CINode node) {

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@ -295,10 +296,10 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool {
Path outputDir = getTestDir(TEST_NAME, "load-output");
NMapInputFormat.setNumMapTasks(conf, conf.getInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT));
conf.set(TABLE_NAME_KEY, htd.getNameAsString());
conf.set(TABLE_NAME_KEY, htd.getTableName().getNameAsString());
Job job = new Job(conf);
job.setJobName(TEST_NAME + " Load for " + htd.getNameAsString());
job.setJobName(TEST_NAME + " Load for " + htd.getTableName());
job.setJarByClass(this.getClass());
job.setMapperClass(LoadMapper.class);
job.setInputFormatClass(NMapInputFormat.class);
@ -317,12 +318,12 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool {
Job job = new Job(conf);
job.setJarByClass(this.getClass());
job.setJobName(TEST_NAME + " Verification for " + htd.getNameAsString());
job.setJobName(TEST_NAME + " Verification for " + htd.getTableName());
Scan scan = new Scan();
TableMapReduceUtil.initTableMapperJob(
htd.getNameAsString(), scan, VerifyMapper.class,
htd.getTableName().getNameAsString(), scan, VerifyMapper.class,
BytesWritable.class, BytesWritable.class, job);
int scannerCaching = conf.getInt("verify.scannercaching", SCANNER_CACHING);
TableMapReduceUtil.setScannerCaching(job, scannerCaching);
@ -349,7 +350,7 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool {
@Test
public void testLoadAndVerify() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TEST_NAME);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_NAME));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = getTestingUtil().getHBaseAdmin();
@ -367,20 +368,20 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool {
private void deleteTable(HBaseAdmin admin, HTableDescriptor htd)
throws IOException, InterruptedException {
// Use disableTestAsync because disable can take a long time to complete
System.out.print("Disabling table " + htd.getNameAsString() +" ");
admin.disableTableAsync(htd.getName());
System.out.print("Disabling table " + htd.getTableName() +" ");
admin.disableTableAsync(htd.getTableName());
long start = System.currentTimeMillis();
// NOTE tables can be both admin.isTableEnabled=false and
// isTableDisabled=false, when disabling must use isTableDisabled!
while (!admin.isTableDisabled(htd.getName())) {
while (!admin.isTableDisabled(htd.getTableName())) {
System.out.print(".");
Thread.sleep(1000);
}
long delta = System.currentTimeMillis() - start;
System.out.println(" " + delta +" ms");
System.out.println("Deleting table " + htd.getNameAsString() +" ");
admin.deleteTable(htd.getName());
System.out.println("Deleting table " + htd.getTableName() +" ");
admin.deleteTable(htd.getTableName());
}
public void usage() {
@ -424,7 +425,7 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool {
// create HTableDescriptor for specified table
String table = getConf().get(TABLE_NAME_KEY, TEST_NAME);
HTableDescriptor htd = new HTableDescriptor(table);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
HBaseAdmin admin = new HBaseAdmin(getConf());

View File

@ -16,9 +16,10 @@ public final class AccessControlProtos {
int getActionCount();
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Action getAction(int index);
// optional bytes table = 2;
boolean hasTable();
com.google.protobuf.ByteString getTable();
// optional .TableName tableName = 2;
boolean hasTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
// optional bytes family = 3;
boolean hasFamily();
@ -148,14 +149,17 @@ public final class AccessControlProtos {
return action_.get(index);
}
// optional bytes table = 2;
public static final int TABLE_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString table_;
public boolean hasTable() {
// optional .TableName tableName = 2;
public static final int TABLENAME_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTable() {
return table_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
return tableName_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
// optional bytes family = 3;
@ -180,7 +184,7 @@ public final class AccessControlProtos {
private void initFields() {
action_ = java.util.Collections.emptyList();
table_ = com.google.protobuf.ByteString.EMPTY;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
family_ = com.google.protobuf.ByteString.EMPTY;
qualifier_ = com.google.protobuf.ByteString.EMPTY;
}
@ -189,6 +193,12 @@ public final class AccessControlProtos {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasTableName()) {
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -200,7 +210,7 @@ public final class AccessControlProtos {
output.writeEnum(1, action_.get(i).getNumber());
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(2, table_);
output.writeMessage(2, tableName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(3, family_);
@ -228,7 +238,7 @@ public final class AccessControlProtos {
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, table_);
.computeMessageSize(2, tableName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
@ -263,10 +273,10 @@ public final class AccessControlProtos {
boolean result = true;
result = result && getActionList()
.equals(other.getActionList());
result = result && (hasTable() == other.hasTable());
if (hasTable()) {
result = result && getTable()
.equals(other.getTable());
result = result && (hasTableName() == other.hasTableName());
if (hasTableName()) {
result = result && getTableName()
.equals(other.getTableName());
}
result = result && (hasFamily() == other.hasFamily());
if (hasFamily()) {
@ -291,9 +301,9 @@ public final class AccessControlProtos {
hash = (37 * hash) + ACTION_FIELD_NUMBER;
hash = (53 * hash) + hashEnumList(getActionList());
}
if (hasTable()) {
hash = (37 * hash) + TABLE_FIELD_NUMBER;
hash = (53 * hash) + getTable().hashCode();
if (hasTableName()) {
hash = (37 * hash) + TABLENAME_FIELD_NUMBER;
hash = (53 * hash) + getTableName().hashCode();
}
if (hasFamily()) {
hash = (37 * hash) + FAMILY_FIELD_NUMBER;
@ -411,6 +421,7 @@ public final class AccessControlProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
}
}
private static Builder create() {
@ -421,7 +432,11 @@ public final class AccessControlProtos {
super.clear();
action_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
table_ = com.google.protobuf.ByteString.EMPTY;
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
family_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
@ -473,7 +488,11 @@ public final class AccessControlProtos {
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.table_ = table_;
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
@ -508,8 +527,8 @@ public final class AccessControlProtos {
}
onChanged();
}
if (other.hasTable()) {
setTable(other.getTable());
if (other.hasTableName()) {
mergeTableName(other.getTableName());
}
if (other.hasFamily()) {
setFamily(other.getFamily());
@ -522,6 +541,12 @@ public final class AccessControlProtos {
}
public final boolean isInitialized() {
if (hasTableName()) {
if (!getTableName().isInitialized()) {
return false;
}
}
return true;
}
@ -574,8 +599,12 @@ public final class AccessControlProtos {
break;
}
case 18: {
bitField0_ |= 0x00000002;
table_ = input.readBytes();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
if (hasTableName()) {
subBuilder.mergeFrom(getTableName());
}
input.readMessage(subBuilder, extensionRegistry);
setTableName(subBuilder.buildPartial());
break;
}
case 26: {
@ -645,29 +674,95 @@ public final class AccessControlProtos {
return this;
}
// optional bytes table = 2;
private com.google.protobuf.ByteString table_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasTable() {
// optional .TableName tableName = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
public com.google.protobuf.ByteString getTable() {
return table_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
public Builder setTable(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
table_ = value;
onChanged();
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder clearTable() {
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
table_ = getDefaultInstance().getTable();
onChanged();
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// optional bytes family = 3;
private com.google.protobuf.ByteString family_ = com.google.protobuf.ByteString.EMPTY;
@ -809,6 +904,10 @@ public final class AccessControlProtos {
memoizedIsInitialized = 0;
return false;
}
if (!getPermission().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@ -1099,6 +1198,10 @@ public final class AccessControlProtos {
return false;
}
if (!getPermission().isInitialized()) {
return false;
}
return true;
}
@ -1401,6 +1504,12 @@ public final class AccessControlProtos {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getPermissionsCount(); i++) {
if (!getPermissions(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -1708,6 +1817,12 @@ public final class AccessControlProtos {
return false;
}
for (int i = 0; i < getPermissionsCount(); i++) {
if (!getPermissions(i).isInitialized()) {
return false;
}
}
return true;
}
@ -4073,9 +4188,10 @@ public final class AccessControlProtos {
public interface UserPermissionsRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bytes table = 1;
boolean hasTable();
com.google.protobuf.ByteString getTable();
// optional .TableName tableName = 1;
boolean hasTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
}
public static final class UserPermissionsRequest extends
com.google.protobuf.GeneratedMessage
@ -4106,24 +4222,33 @@ public final class AccessControlProtos {
}
private int bitField0_;
// optional bytes table = 1;
public static final int TABLE_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString table_;
public boolean hasTable() {
// optional .TableName tableName = 1;
public static final int TABLENAME_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTable() {
return table_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
return tableName_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
private void initFields() {
table_ = com.google.protobuf.ByteString.EMPTY;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasTableName()) {
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -4132,7 +4257,7 @@ public final class AccessControlProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, table_);
output.writeMessage(1, tableName_);
}
getUnknownFields().writeTo(output);
}
@ -4145,7 +4270,7 @@ public final class AccessControlProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, table_);
.computeMessageSize(1, tableName_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@ -4170,10 +4295,10 @@ public final class AccessControlProtos {
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest) obj;
boolean result = true;
result = result && (hasTable() == other.hasTable());
if (hasTable()) {
result = result && getTable()
.equals(other.getTable());
result = result && (hasTableName() == other.hasTableName());
if (hasTableName()) {
result = result && getTableName()
.equals(other.getTableName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
@ -4184,9 +4309,9 @@ public final class AccessControlProtos {
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasTable()) {
hash = (37 * hash) + TABLE_FIELD_NUMBER;
hash = (53 * hash) + getTable().hashCode();
if (hasTableName()) {
hash = (37 * hash) + TABLENAME_FIELD_NUMBER;
hash = (53 * hash) + getTableName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
@ -4296,6 +4421,7 @@ public final class AccessControlProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
}
}
private static Builder create() {
@ -4304,7 +4430,11 @@ public final class AccessControlProtos {
public Builder clear() {
super.clear();
table_ = com.google.protobuf.ByteString.EMPTY;
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@ -4347,7 +4477,11 @@ public final class AccessControlProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.table_ = table_;
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -4364,14 +4498,20 @@ public final class AccessControlProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.getDefaultInstance()) return this;
if (other.hasTable()) {
setTable(other.getTable());
if (other.hasTableName()) {
mergeTableName(other.getTableName());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasTableName()) {
if (!getTableName().isInitialized()) {
return false;
}
}
return true;
}
@ -4399,8 +4539,12 @@ public final class AccessControlProtos {
break;
}
case 10: {
bitField0_ |= 0x00000001;
table_ = input.readBytes();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
if (hasTableName()) {
subBuilder.mergeFrom(getTableName());
}
input.readMessage(subBuilder, extensionRegistry);
setTableName(subBuilder.buildPartial());
break;
}
}
@ -4409,29 +4553,95 @@ public final class AccessControlProtos {
private int bitField0_;
// optional bytes table = 1;
private com.google.protobuf.ByteString table_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasTable() {
// optional .TableName tableName = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTable() {
return table_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
public Builder setTable(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
table_ = value;
onChanged();
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearTable() {
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
table_ = getDefaultInstance().getTable();
onChanged();
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// @@protoc_insertion_point(builder_scope:UserPermissionsRequest)
}
@ -5107,6 +5317,12 @@ public final class AccessControlProtos {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getPermissionCount(); i++) {
if (!getPermission(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -5383,6 +5599,12 @@ public final class AccessControlProtos {
}
public final boolean isInitialized() {
for (int i = 0; i < getPermissionCount(); i++) {
if (!getPermission(i).isInitialized()) {
return false;
}
}
return true;
}
@ -6403,33 +6625,34 @@ public final class AccessControlProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\023AccessControl.proto\"\242\001\n\nPermission\022\"\n\006" +
"action\030\001 \003(\0162\022.Permission.Action\022\r\n\005tabl" +
"e\030\002 \001(\014\022\016\n\006family\030\003 \001(\014\022\021\n\tqualifier\030\004 \001" +
"(\014\">\n\006Action\022\010\n\004READ\020\000\022\t\n\005WRITE\020\001\022\010\n\004EXE" +
"C\020\002\022\n\n\006CREATE\020\003\022\t\n\005ADMIN\020\004\"?\n\016UserPermis" +
"sion\022\014\n\004user\030\001 \002(\014\022\037\n\npermission\030\002 \002(\0132\013" +
".Permission\"\225\001\n\024UserTablePermissions\022:\n\013" +
"permissions\030\001 \003(\0132%.UserTablePermissions" +
".UserPermissions\032A\n\017UserPermissions\022\014\n\004u" +
"ser\030\001 \002(\014\022 \n\013permissions\030\002 \003(\0132\013.Permiss",
"ion\"3\n\014GrantRequest\022#\n\npermission\030\001 \002(\0132" +
"\017.UserPermission\"\017\n\rGrantResponse\"4\n\rRev" +
"okeRequest\022#\n\npermission\030\001 \002(\0132\017.UserPer" +
"mission\"\020\n\016RevokeResponse\"\'\n\026UserPermiss" +
"ionsRequest\022\r\n\005table\030\001 \001(\014\">\n\027UserPermis" +
"sionsResponse\022#\n\npermission\030\001 \003(\0132\017.User" +
"Permission\":\n\027CheckPermissionsRequest\022\037\n" +
"\npermission\030\001 \003(\0132\013.Permission\"\032\n\030CheckP" +
"ermissionsResponse2\373\001\n\024AccessControlServ" +
"ice\022&\n\005Grant\022\r.GrantRequest\032\016.GrantRespo",
"nse\022)\n\006Revoke\022\016.RevokeRequest\032\017.RevokeRe" +
"sponse\022G\n\022GetUserPermissions\022\027.UserPermi" +
"ssionsRequest\032\030.UserPermissionsResponse\022" +
"G\n\020CheckPermissions\022\030.CheckPermissionsRe" +
"quest\032\031.CheckPermissionsResponseBI\n*org." +
"apache.hadoop.hbase.protobuf.generatedB\023" +
"AccessControlProtosH\001\210\001\001\240\001\001"
"\n\023AccessControl.proto\032\013hbase.proto\"\262\001\n\nP" +
"ermission\022\"\n\006action\030\001 \003(\0162\022.Permission.A" +
"ction\022\035\n\ttableName\030\002 \001(\0132\n.TableName\022\016\n\006" +
"family\030\003 \001(\014\022\021\n\tqualifier\030\004 \001(\014\">\n\006Actio" +
"n\022\010\n\004READ\020\000\022\t\n\005WRITE\020\001\022\010\n\004EXEC\020\002\022\n\n\006CREA" +
"TE\020\003\022\t\n\005ADMIN\020\004\"?\n\016UserPermission\022\014\n\004use" +
"r\030\001 \002(\014\022\037\n\npermission\030\002 \002(\0132\013.Permission" +
"\"\225\001\n\024UserTablePermissions\022:\n\013permissions" +
"\030\001 \003(\0132%.UserTablePermissions.UserPermis" +
"sions\032A\n\017UserPermissions\022\014\n\004user\030\001 \002(\014\022 ",
"\n\013permissions\030\002 \003(\0132\013.Permission\"3\n\014Gran" +
"tRequest\022#\n\npermission\030\001 \002(\0132\017.UserPermi" +
"ssion\"\017\n\rGrantResponse\"4\n\rRevokeRequest\022" +
"#\n\npermission\030\001 \002(\0132\017.UserPermission\"\020\n\016" +
"RevokeResponse\"7\n\026UserPermissionsRequest" +
"\022\035\n\ttableName\030\001 \001(\0132\n.TableName\">\n\027UserP" +
"ermissionsResponse\022#\n\npermission\030\001 \003(\0132\017" +
".UserPermission\":\n\027CheckPermissionsReque" +
"st\022\037\n\npermission\030\001 \003(\0132\013.Permission\"\032\n\030C" +
"heckPermissionsResponse2\373\001\n\024AccessContro",
"lService\022&\n\005Grant\022\r.GrantRequest\032\016.Grant" +
"Response\022)\n\006Revoke\022\016.RevokeRequest\032\017.Rev" +
"okeResponse\022G\n\022GetUserPermissions\022\027.User" +
"PermissionsRequest\032\030.UserPermissionsResp" +
"onse\022G\n\020CheckPermissions\022\030.CheckPermissi" +
"onsRequest\032\031.CheckPermissionsResponseBI\n" +
"*org.apache.hadoop.hbase.protobuf.genera" +
"tedB\023AccessControlProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -6441,7 +6664,7 @@ public final class AccessControlProtos {
internal_static_Permission_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_Permission_descriptor,
new java.lang.String[] { "Action", "Table", "Family", "Qualifier", },
new java.lang.String[] { "Action", "TableName", "Family", "Qualifier", },
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.class,
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder.class);
internal_static_UserPermission_descriptor =
@ -6505,7 +6728,7 @@ public final class AccessControlProtos {
internal_static_UserPermissionsRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UserPermissionsRequest_descriptor,
new java.lang.String[] { "Table", },
new java.lang.String[] { "TableName", },
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.class,
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.Builder.class);
internal_static_UserPermissionsResponse_descriptor =
@ -6538,6 +6761,7 @@ public final class AccessControlProtos {
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
}, assigner);
}

View File

@ -11,9 +11,10 @@ public final class MasterMonitorProtos {
public interface GetSchemaAlterStatusRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes table_name = 1;
// required .TableName table_name = 1;
boolean hasTableName();
com.google.protobuf.ByteString getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
}
public static final class GetSchemaAlterStatusRequest extends
com.google.protobuf.GeneratedMessage
@ -44,18 +45,21 @@ public final class MasterMonitorProtos {
}
private int bitField0_;
// required bytes table_name = 1;
// required .TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString tableName_;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
return tableName_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
private void initFields() {
tableName_ = com.google.protobuf.ByteString.EMPTY;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -66,6 +70,10 @@ public final class MasterMonitorProtos {
memoizedIsInitialized = 0;
return false;
}
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@ -74,7 +82,7 @@ public final class MasterMonitorProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, tableName_);
output.writeMessage(1, tableName_);
}
getUnknownFields().writeTo(output);
}
@ -87,7 +95,7 @@ public final class MasterMonitorProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, tableName_);
.computeMessageSize(1, tableName_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@ -238,6 +246,7 @@ public final class MasterMonitorProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
}
}
private static Builder create() {
@ -246,7 +255,11 @@ public final class MasterMonitorProtos {
public Builder clear() {
super.clear();
tableName_ = com.google.protobuf.ByteString.EMPTY;
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@ -289,7 +302,11 @@ public final class MasterMonitorProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.tableName_ = tableName_;
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -307,7 +324,7 @@ public final class MasterMonitorProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest.getDefaultInstance()) return this;
if (other.hasTableName()) {
setTableName(other.getTableName());
mergeTableName(other.getTableName());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
@ -318,6 +335,10 @@ public final class MasterMonitorProtos {
return false;
}
if (!getTableName().isInitialized()) {
return false;
}
return true;
}
@ -345,8 +366,12 @@ public final class MasterMonitorProtos {
break;
}
case 10: {
bitField0_ |= 0x00000001;
tableName_ = input.readBytes();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
if (hasTableName()) {
subBuilder.mergeFrom(getTableName());
}
input.readMessage(subBuilder, extensionRegistry);
setTableName(subBuilder.buildPartial());
break;
}
}
@ -355,29 +380,95 @@ public final class MasterMonitorProtos {
private int bitField0_;
// required bytes table_name = 1;
private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
// required .TableName table_name = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
return tableName_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
public Builder setTableName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
tableName_ = value;
onChanged();
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
tableName_ = getDefaultInstance().getTableName();
onChanged();
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// @@protoc_insertion_point(builder_scope:GetSchemaAlterStatusRequest)
}
@ -830,10 +921,15 @@ public final class MasterMonitorProtos {
public interface GetTableDescriptorsRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string table_names = 1;
java.util.List<String> getTableNamesList();
// repeated .TableName table_names = 1;
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>
getTableNamesList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index);
int getTableNamesCount();
String getTableNames(int index);
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNamesOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
int index);
}
public static final class GetTableDescriptorsRequest extends
com.google.protobuf.GeneratedMessage
@ -863,28 +959,41 @@ public final class MasterMonitorProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableDescriptorsRequest_fieldAccessorTable;
}
// repeated string table_names = 1;
// repeated .TableName table_names = 1;
public static final int TABLE_NAMES_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList tableNames_;
public java.util.List<String>
getTableNamesList() {
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
return tableNames_;
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNamesOrBuilderList() {
return tableNames_;
}
public int getTableNamesCount() {
return tableNames_.size();
}
public String getTableNames(int index) {
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
return tableNames_.get(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
int index) {
return tableNames_.get(index);
}
private void initFields() {
tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
tableNames_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getTableNamesCount(); i++) {
if (!getTableNames(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
@ -893,7 +1002,7 @@ public final class MasterMonitorProtos {
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < tableNames_.size(); i++) {
output.writeBytes(1, tableNames_.getByteString(i));
output.writeMessage(1, tableNames_.get(i));
}
getUnknownFields().writeTo(output);
}
@ -904,14 +1013,9 @@ public final class MasterMonitorProtos {
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < tableNames_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(tableNames_.getByteString(i));
}
size += dataSize;
size += 1 * getTableNamesList().size();
for (int i = 0; i < tableNames_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, tableNames_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@ -1059,6 +1163,7 @@ public final class MasterMonitorProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNamesFieldBuilder();
}
}
private static Builder create() {
@ -1067,8 +1172,12 @@ public final class MasterMonitorProtos {
public Builder clear() {
super.clear();
tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
if (tableNamesBuilder_ == null) {
tableNames_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
tableNamesBuilder_.clear();
}
return this;
}
@ -1106,12 +1215,15 @@ public final class MasterMonitorProtos {
public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
tableNames_ = new com.google.protobuf.UnmodifiableLazyStringList(
tableNames_);
bitField0_ = (bitField0_ & ~0x00000001);
if (tableNamesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
tableNames_ = java.util.Collections.unmodifiableList(tableNames_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.tableNames_ = tableNames_;
} else {
result.tableNames_ = tableNamesBuilder_.build();
}
result.tableNames_ = tableNames_;
onBuilt();
return result;
}
@ -1127,21 +1239,43 @@ public final class MasterMonitorProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance()) return this;
if (!other.tableNames_.isEmpty()) {
if (tableNames_.isEmpty()) {
tableNames_ = other.tableNames_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureTableNamesIsMutable();
tableNames_.addAll(other.tableNames_);
if (tableNamesBuilder_ == null) {
if (!other.tableNames_.isEmpty()) {
if (tableNames_.isEmpty()) {
tableNames_ = other.tableNames_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureTableNamesIsMutable();
tableNames_.addAll(other.tableNames_);
}
onChanged();
}
} else {
if (!other.tableNames_.isEmpty()) {
if (tableNamesBuilder_.isEmpty()) {
tableNamesBuilder_.dispose();
tableNamesBuilder_ = null;
tableNames_ = other.tableNames_;
bitField0_ = (bitField0_ & ~0x00000001);
tableNamesBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTableNamesFieldBuilder() : null;
} else {
tableNamesBuilder_.addAllMessages(other.tableNames_);
}
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getTableNamesCount(); i++) {
if (!getTableNames(i).isInitialized()) {
return false;
}
}
return true;
}
@ -1169,8 +1303,9 @@ public final class MasterMonitorProtos {
break;
}
case 10: {
ensureTableNamesIsMutable();
tableNames_.add(input.readBytes());
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
input.readMessage(subBuilder, extensionRegistry);
addTableNames(subBuilder.buildPartial());
break;
}
}
@ -1179,60 +1314,190 @@ public final class MasterMonitorProtos {
private int bitField0_;
// repeated string table_names = 1;
private com.google.protobuf.LazyStringList tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
// repeated .TableName table_names = 1;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_ =
java.util.Collections.emptyList();
private void ensureTableNamesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
tableNames_ = new com.google.protobuf.LazyStringArrayList(tableNames_);
tableNames_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>(tableNames_);
bitField0_ |= 0x00000001;
}
}
public java.util.List<String>
getTableNamesList() {
return java.util.Collections.unmodifiableList(tableNames_);
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNamesBuilder_;
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
if (tableNamesBuilder_ == null) {
return java.util.Collections.unmodifiableList(tableNames_);
} else {
return tableNamesBuilder_.getMessageList();
}
}
public int getTableNamesCount() {
return tableNames_.size();
if (tableNamesBuilder_ == null) {
return tableNames_.size();
} else {
return tableNamesBuilder_.getCount();
}
}
public String getTableNames(int index) {
return tableNames_.get(index);
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
if (tableNamesBuilder_ == null) {
return tableNames_.get(index);
} else {
return tableNamesBuilder_.getMessage(index);
}
}
public Builder setTableNames(
int index, String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTableNamesIsMutable();
tableNames_.set(index, value);
onChanged();
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableNamesIsMutable();
tableNames_.set(index, value);
onChanged();
} else {
tableNamesBuilder_.setMessage(index, value);
}
return this;
}
public Builder addTableNames(String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTableNamesIsMutable();
tableNames_.add(value);
onChanged();
public Builder setTableNames(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNamesBuilder_ == null) {
ensureTableNamesIsMutable();
tableNames_.set(index, builderForValue.build());
onChanged();
} else {
tableNamesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
public Builder addTableNames(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableNamesIsMutable();
tableNames_.add(value);
onChanged();
} else {
tableNamesBuilder_.addMessage(value);
}
return this;
}
public Builder addTableNames(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableNamesIsMutable();
tableNames_.add(index, value);
onChanged();
} else {
tableNamesBuilder_.addMessage(index, value);
}
return this;
}
public Builder addTableNames(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNamesBuilder_ == null) {
ensureTableNamesIsMutable();
tableNames_.add(builderForValue.build());
onChanged();
} else {
tableNamesBuilder_.addMessage(builderForValue.build());
}
return this;
}
public Builder addTableNames(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNamesBuilder_ == null) {
ensureTableNamesIsMutable();
tableNames_.add(index, builderForValue.build());
onChanged();
} else {
tableNamesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
public Builder addAllTableNames(
java.lang.Iterable<String> values) {
ensureTableNamesIsMutable();
super.addAll(values, tableNames_);
onChanged();
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
if (tableNamesBuilder_ == null) {
ensureTableNamesIsMutable();
super.addAll(values, tableNames_);
onChanged();
} else {
tableNamesBuilder_.addAllMessages(values);
}
return this;
}
public Builder clearTableNames() {
tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
if (tableNamesBuilder_ == null) {
tableNames_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
tableNamesBuilder_.clear();
}
return this;
}
void addTableNames(com.google.protobuf.ByteString value) {
ensureTableNamesIsMutable();
tableNames_.add(value);
onChanged();
public Builder removeTableNames(int index) {
if (tableNamesBuilder_ == null) {
ensureTableNamesIsMutable();
tableNames_.remove(index);
onChanged();
} else {
tableNamesBuilder_.remove(index);
}
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNamesBuilder(
int index) {
return getTableNamesFieldBuilder().getBuilder(index);
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
int index) {
if (tableNamesBuilder_ == null) {
return tableNames_.get(index); } else {
return tableNamesBuilder_.getMessageOrBuilder(index);
}
}
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNamesOrBuilderList() {
if (tableNamesBuilder_ != null) {
return tableNamesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(tableNames_);
}
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder() {
return getTableNamesFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder(
int index) {
return getTableNamesFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
}
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder>
getTableNamesBuilderList() {
return getTableNamesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNamesFieldBuilder() {
if (tableNamesBuilder_ == null) {
tableNamesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableNames_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
tableNames_ = null;
}
return tableNamesBuilder_;
}
// @@protoc_insertion_point(builder_scope:GetTableDescriptorsRequest)
@ -3068,26 +3333,27 @@ public final class MasterMonitorProtos {
static {
java.lang.String[] descriptorData = {
"\n\023MasterMonitor.proto\032\014Master.proto\032\013hba" +
"se.proto\032\023ClusterStatus.proto\"1\n\033GetSche" +
"maAlterStatusRequest\022\022\n\ntable_name\030\001 \002(\014" +
"\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_" +
"to_update_regions\030\001 \001(\r\022\025\n\rtotal_regions" +
"\030\002 \001(\r\"1\n\032GetTableDescriptorsRequest\022\023\n\013" +
"table_names\030\001 \003(\t\"A\n\033GetTableDescriptors" +
"Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" +
"hema\"\031\n\027GetClusterStatusRequest\"B\n\030GetCl" +
"usterStatusResponse\022&\n\016cluster_status\030\001 ",
"\002(\0132\016.ClusterStatus2\314\002\n\024MasterMonitorSer" +
"vice\022S\n\024GetSchemaAlterStatus\022\034.GetSchema" +
"AlterStatusRequest\032\035.GetSchemaAlterStatu" +
"sResponse\022P\n\023GetTableDescriptors\022\033.GetTa" +
"bleDescriptorsRequest\032\034.GetTableDescript" +
"orsResponse\022G\n\020GetClusterStatus\022\030.GetClu" +
"sterStatusRequest\032\031.GetClusterStatusResp" +
"onse\022D\n\017IsMasterRunning\022\027.IsMasterRunnin" +
"gRequest\032\030.IsMasterRunningResponseBI\n*or" +
"g.apache.hadoop.hbase.protobuf.generated",
"B\023MasterMonitorProtosH\001\210\001\001\240\001\001"
"se.proto\032\023ClusterStatus.proto\"=\n\033GetSche" +
"maAlterStatusRequest\022\036\n\ntable_name\030\001 \002(\013" +
"2\n.TableName\"T\n\034GetSchemaAlterStatusResp" +
"onse\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rt" +
"otal_regions\030\002 \001(\r\"=\n\032GetTableDescriptor" +
"sRequest\022\037\n\013table_names\030\001 \003(\0132\n.TableNam" +
"e\"A\n\033GetTableDescriptorsResponse\022\"\n\014tabl" +
"e_schema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClust" +
"erStatusRequest\"B\n\030GetClusterStatusRespo",
"nse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterSta" +
"tus2\314\002\n\024MasterMonitorService\022S\n\024GetSchem" +
"aAlterStatus\022\034.GetSchemaAlterStatusReque" +
"st\032\035.GetSchemaAlterStatusResponse\022P\n\023Get" +
"TableDescriptors\022\033.GetTableDescriptorsRe" +
"quest\032\034.GetTableDescriptorsResponse\022G\n\020G" +
"etClusterStatus\022\030.GetClusterStatusReques" +
"t\032\031.GetClusterStatusResponse\022D\n\017IsMaster" +
"Running\022\027.IsMasterRunningRequest\032\030.IsMas" +
"terRunningResponseBI\n*org.apache.hadoop.",
"hbase.protobuf.generatedB\023MasterMonitorP" +
"rotosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

View File

@ -1972,9 +1972,10 @@ public final class SecureBulkLoadProtos {
public interface PrepareBulkLoadRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes table_name = 1;
// required .TableName table_name = 1;
boolean hasTableName();
com.google.protobuf.ByteString getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
}
public static final class PrepareBulkLoadRequest extends
com.google.protobuf.GeneratedMessage
@ -2005,18 +2006,21 @@ public final class SecureBulkLoadProtos {
}
private int bitField0_;
// required bytes table_name = 1;
// required .TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString tableName_;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
return tableName_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
private void initFields() {
tableName_ = com.google.protobuf.ByteString.EMPTY;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -2027,6 +2031,10 @@ public final class SecureBulkLoadProtos {
memoizedIsInitialized = 0;
return false;
}
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
@ -2035,7 +2043,7 @@ public final class SecureBulkLoadProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, tableName_);
output.writeMessage(1, tableName_);
}
getUnknownFields().writeTo(output);
}
@ -2048,7 +2056,7 @@ public final class SecureBulkLoadProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, tableName_);
.computeMessageSize(1, tableName_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@ -2199,6 +2207,7 @@ public final class SecureBulkLoadProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
}
}
private static Builder create() {
@ -2207,7 +2216,11 @@ public final class SecureBulkLoadProtos {
public Builder clear() {
super.clear();
tableName_ = com.google.protobuf.ByteString.EMPTY;
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@ -2250,7 +2263,11 @@ public final class SecureBulkLoadProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.tableName_ = tableName_;
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -2268,7 +2285,7 @@ public final class SecureBulkLoadProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest.getDefaultInstance()) return this;
if (other.hasTableName()) {
setTableName(other.getTableName());
mergeTableName(other.getTableName());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
@ -2279,6 +2296,10 @@ public final class SecureBulkLoadProtos {
return false;
}
if (!getTableName().isInitialized()) {
return false;
}
return true;
}
@ -2306,8 +2327,12 @@ public final class SecureBulkLoadProtos {
break;
}
case 10: {
bitField0_ |= 0x00000001;
tableName_ = input.readBytes();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
if (hasTableName()) {
subBuilder.mergeFrom(getTableName());
}
input.readMessage(subBuilder, extensionRegistry);
setTableName(subBuilder.buildPartial());
break;
}
}
@ -2316,29 +2341,95 @@ public final class SecureBulkLoadProtos {
private int bitField0_;
// required bytes table_name = 1;
private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
// required .TableName table_name = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
return tableName_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
public Builder setTableName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
tableName_ = value;
onChanged();
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
tableName_ = getDefaultInstance().getTableName();
onChanged();
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// @@protoc_insertion_point(builder_scope:PrepareBulkLoadRequest)
}
@ -3887,19 +3978,19 @@ public final class SecureBulkLoadProtos {
"FilesResponse\022\016\n\006loaded\030\001 \002(\010\"[\n\024Delegat" +
"ionTokenProto\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010pas" +
"sword\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001" +
"(\t\",\n\026PrepareBulkLoadRequest\022\022\n\ntable_na",
"me\030\001 \002(\014\"-\n\027PrepareBulkLoadResponse\022\022\n\nb" +
"ulk_token\030\001 \002(\t\",\n\026CleanupBulkLoadReques" +
"t\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027CleanupBulkLoad" +
"Response2\370\001\n\025SecureBulkLoadService\022D\n\017Pr" +
"epareBulkLoad\022\027.PrepareBulkLoadRequest\032\030" +
".PrepareBulkLoadResponse\022S\n\024SecureBulkLo" +
"adHFiles\022\034.SecureBulkLoadHFilesRequest\032\035" +
".SecureBulkLoadHFilesResponse\022D\n\017Cleanup" +
"BulkLoad\022\027.CleanupBulkLoadRequest\032\030.Clea" +
"nupBulkLoadResponseBJ\n*org.apache.hadoop",
".hbase.protobuf.generatedB\024SecureBulkLoa" +
"dProtosH\001\210\001\001\240\001\001"
"(\t\"8\n\026PrepareBulkLoadRequest\022\036\n\ntable_na",
"me\030\001 \002(\0132\n.TableName\"-\n\027PrepareBulkLoadR" +
"esponse\022\022\n\nbulk_token\030\001 \002(\t\",\n\026CleanupBu" +
"lkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027Cle" +
"anupBulkLoadResponse2\370\001\n\025SecureBulkLoadS" +
"ervice\022D\n\017PrepareBulkLoad\022\027.PrepareBulkL" +
"oadRequest\032\030.PrepareBulkLoadResponse\022S\n\024" +
"SecureBulkLoadHFiles\022\034.SecureBulkLoadHFi" +
"lesRequest\032\035.SecureBulkLoadHFilesRespons" +
"e\022D\n\017CleanupBulkLoad\022\027.CleanupBulkLoadRe" +
"quest\032\030.CleanupBulkLoadResponseBJ\n*org.a",
"pache.hadoop.hbase.protobuf.generatedB\024S" +
"ecureBulkLoadProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

View File

@ -5037,9 +5037,10 @@ public final class ZooKeeperProtos {
public interface TableLockOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bytes table_name = 1;
// optional .TableName table_name = 1;
boolean hasTableName();
com.google.protobuf.ByteString getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
// optional .ServerName lock_owner = 2;
boolean hasLockOwner();
@ -5091,13 +5092,16 @@ public final class ZooKeeperProtos {
}
private int bitField0_;
// optional bytes table_name = 1;
// optional .TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString tableName_;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
return tableName_;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
@ -5177,7 +5181,7 @@ public final class ZooKeeperProtos {
}
private void initFields() {
tableName_ = com.google.protobuf.ByteString.EMPTY;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
threadId_ = 0L;
isShared_ = false;
@ -5189,6 +5193,12 @@ public final class ZooKeeperProtos {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasTableName()) {
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasLockOwner()) {
if (!getLockOwner().isInitialized()) {
memoizedIsInitialized = 0;
@ -5203,7 +5213,7 @@ public final class ZooKeeperProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, tableName_);
output.writeMessage(1, tableName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, lockOwner_);
@ -5231,7 +5241,7 @@ public final class ZooKeeperProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, tableName_);
.computeMessageSize(1, tableName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
@ -5447,6 +5457,7 @@ public final class ZooKeeperProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
getLockOwnerFieldBuilder();
}
}
@ -5456,7 +5467,11 @@ public final class ZooKeeperProtos {
public Builder clear() {
super.clear();
tableName_ = com.google.protobuf.ByteString.EMPTY;
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (lockOwnerBuilder_ == null) {
lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@ -5513,7 +5528,11 @@ public final class ZooKeeperProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.tableName_ = tableName_;
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
@ -5555,7 +5574,7 @@ public final class ZooKeeperProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this;
if (other.hasTableName()) {
setTableName(other.getTableName());
mergeTableName(other.getTableName());
}
if (other.hasLockOwner()) {
mergeLockOwner(other.getLockOwner());
@ -5577,6 +5596,12 @@ public final class ZooKeeperProtos {
}
public final boolean isInitialized() {
if (hasTableName()) {
if (!getTableName().isInitialized()) {
return false;
}
}
if (hasLockOwner()) {
if (!getLockOwner().isInitialized()) {
@ -5610,8 +5635,12 @@ public final class ZooKeeperProtos {
break;
}
case 10: {
bitField0_ |= 0x00000001;
tableName_ = input.readBytes();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
if (hasTableName()) {
subBuilder.mergeFrom(getTableName());
}
input.readMessage(subBuilder, extensionRegistry);
setTableName(subBuilder.buildPartial());
break;
}
case 18: {
@ -5649,29 +5678,95 @@ public final class ZooKeeperProtos {
private int bitField0_;
// optional bytes table_name = 1;
private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
// optional .TableName table_name = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getTableName() {
return tableName_;
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
public Builder setTableName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
tableName_ = value;
onChanged();
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
tableName_ = getDefaultInstance().getTableName();
onChanged();
return this;
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// optional .ServerName lock_owner = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
@ -7093,16 +7188,17 @@ public final class ZooKeeperProtos {
"licationState.State\"\"\n\005State\022\013\n\007ENABLED\020" +
"\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPositi" +
"on\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022",
"\022\n\nlock_owner\030\001 \002(\t\"\214\001\n\tTableLock\022\022\n\ntab" +
"le_name\030\001 \001(\014\022\037\n\nlock_owner\030\002 \001(\0132\013.Serv" +
"erName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004" +
" \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001" +
"(\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002" +
"(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSe" +
"quenceIds\022 \n\030last_flushed_sequence_id\030\001 " +
"\002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSe" +
"quenceIdBE\n*org.apache.hadoop.hbase.prot" +
"obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
"\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntab" +
"le_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030" +
"\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n" +
"\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cre" +
"ate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fam" +
"ily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026R" +
"egionStoreSequenceIds\022 \n\030last_flushed_se" +
"quence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003" +
"(\0132\020.StoreSequenceIdBE\n*org.apache.hadoo" +
"p.hbase.protobuf.generatedB\017ZooKeeperPro",
"tosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

View File

@ -22,6 +22,8 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "hbase.proto";
message Permission {
enum Action {
READ = 0;
@ -31,7 +33,7 @@ message Permission {
ADMIN = 4;
}
repeated Action action = 1;
optional bytes table = 2;
optional TableName tableName = 2;
optional bytes family = 3;
optional bytes qualifier = 4;
}
@ -70,7 +72,7 @@ message RevokeResponse {
message UserPermissionsRequest {
optional bytes table = 1;
optional TableName tableName = 1;
}
message UserPermissionsResponse {

View File

@ -32,7 +32,7 @@ import "Client.proto";
/* Column-level protobufs */
message AddColumnRequest {
required bytes table_name = 1;
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
@ -40,7 +40,7 @@ message AddColumnResponse {
}
message DeleteColumnRequest {
required bytes table_name = 1;
required TableName table_name = 1;
required bytes column_name = 2;
}
@ -48,7 +48,7 @@ message DeleteColumnResponse {
}
message ModifyColumnRequest {
required bytes table_name = 1;
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
@ -110,34 +110,81 @@ message CreateTableResponse {
}
message DeleteTableRequest {
required bytes table_name = 1;
required TableName table_name = 1;
}
message DeleteTableResponse {
}
message EnableTableRequest {
required bytes table_name = 1;
required TableName table_name = 1;
}
message EnableTableResponse {
}
message DisableTableRequest {
required bytes table_name = 1;
required TableName table_name = 1;
}
message DisableTableResponse {
}
message ModifyTableRequest {
required bytes table_name = 1;
required TableName table_name = 1;
required TableSchema table_schema = 2;
}
message ModifyTableResponse {
}
/* Namespace-level protobufs */
message CreateNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message CreateNamespaceResponse {
}
message DeleteNamespaceRequest {
required string namespaceName = 1;
}
message DeleteNamespaceResponse {
}
message ModifyNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message GetNamespaceDescriptorRequest {
required string namespaceName = 1;
}
message GetNamespaceDescriptorResponse {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message ModifyNamespaceResponse {
}
message ListNamespaceDescriptorsRequest {
}
message ListNamespaceDescriptorsResponse {
repeated NamespaceDescriptor namespaceDescriptor = 1;
}
message GetTableDescriptorsByNamespaceRequest {
required string namespaceName = 1;
}
message GetTableDescriptorsByNamespaceResponse {
repeated TableSchema tableSchema = 1;
}
/* Cluster-level protobufs */
@ -382,6 +429,31 @@ service MasterAdminService {
*/
rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
/** return true if master is available */
rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
/** Modify a namespace's metadata */
rpc ModifyNamespace(ModifyNamespaceRequest)
returns(ModifyNamespaceResponse);
/** Creates a new namespace synchronously */
rpc CreateNamespace(CreateNamespaceRequest)
returns(CreateNamespaceResponse);
/** Delete's namespace synchronously */
rpc DeleteNamespace(DeleteNamespaceRequest)
returns(DeleteNamespaceResponse);
/** Get a namespace descriptor by name */
rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest)
returns(GetNamespaceDescriptorResponse);
/** returns a list of namespaces */
rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest)
returns(ListNamespaceDescriptorsResponse);
/** returns a list of tables for a given namespace*/
rpc GetTableDescriptorsByNamespace(GetTableDescriptorsByNamespaceRequest)
returns(GetTableDescriptorsByNamespaceResponse);
}

View File

@ -29,7 +29,7 @@ import "hbase.proto";
import "ClusterStatus.proto";
message GetSchemaAlterStatusRequest {
required bytes table_name = 1;
required TableName table_name = 1;
}
message GetSchemaAlterStatusResponse {
@ -38,7 +38,7 @@ message GetSchemaAlterStatusResponse {
}
message GetTableDescriptorsRequest {
repeated string table_names = 1;
repeated TableName table_names = 1;
}
message GetTableDescriptorsResponse {

View File

@ -44,7 +44,7 @@ message DelegationTokenProto {
}
message PrepareBulkLoadRequest {
required bytes table_name = 1;
required TableName table_name = 1;
}
message PrepareBulkLoadResponse {

View File

@ -34,7 +34,7 @@ message WALKey {
required uint64 log_sequence_number = 3;
required uint64 write_time = 4;
optional UUID cluster_id = 5;
repeated FamilyScope scopes = 6;
optional uint32 following_kv_count = 7;
/*

View File

@ -144,7 +144,7 @@ message ReplicationLock {
* Metadata associated with a table lock in zookeeper
*/
message TableLock {
optional bytes table_name = 1;
optional TableName table_name = 1;
optional ServerName lock_owner = 2;
optional int64 thread_id = 3;
optional bool is_shared = 4;

View File

@ -25,12 +25,20 @@ option optimize_for = SPEED;
import "Cell.proto";
/**
* Table Name
*/
message TableName {
required bytes namespace = 1;
required bytes qualifier = 2;
}
/**
* Table Schema
* Inspired by the rest TableSchema
*/
message TableSchema {
optional bytes name = 1;
optional TableName table_name = 1;
repeated BytesBytesPair attributes = 2;
repeated ColumnFamilySchema column_families = 3;
repeated NameStringPair configuration = 4;
@ -51,7 +59,7 @@ message ColumnFamilySchema {
*/
message RegionInfo {
required uint64 region_id = 1;
required bytes table_name = 2;
required TableName table_name = 2;
optional bytes start_key = 3;
optional bytes end_key = 4;
optional bool offline = 5;
@ -172,3 +180,8 @@ message UUID {
required uint64 least_sig_bits = 1;
required uint64 most_sig_bits = 2;
}
message NamespaceDescriptor {
required bytes name = 1;
repeated NameStringPair configuration = 2;
}

View File

@ -39,14 +39,17 @@ org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.master.AssignmentManager;
org.apache.hadoop.hbase.master.ServerManager;
org.apache.hadoop.hbase.HConstants;
org.apache.hadoop.hbase.NamespaceDescriptor;
org.apache.hadoop.hbase.ServerLoad;
org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.client.HBaseAdmin;
org.apache.hadoop.hbase.client.HConnectionManager;
org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.TableName;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
org.apache.hadoop.hbase.master.DeadServer;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
</%import>
<%if format.equals("json") %>
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
@ -293,27 +296,39 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
</body>
</html>
<%def catalogTables>
<%java>
HTableDescriptor[] sysTables = admin.getTableDescriptorsByNamespace(NamespaceDescriptor
.SYSTEM_NAMESPACE_NAME_STR);
</%java>
<table class="table table-striped">
<tr>
<th>System Tables</th>
<th>Table Name</th>
<%if (frags != null) %>
<th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th>
</%if>
<th>Description</th>
</tr>
<%if (metaLocation != null) %>
<%for HTableDescriptor systemTable : sysTables%>
<tr>
<td><a href="table.jsp?name=<% Bytes.toString(HConstants.META_TABLE_NAME) %>"><% Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
<%java>TableName tableName = systemTable.getTableName();</%java>
<td><a href="table.jsp?name=<% tableName %>"><% tableName %></a></td>
<%if (frags != null)%>
<td align="center"><% frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %></td>
<td align="center"><% frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString())
.intValue() + "%" : "n/a" %></td>
</%if>
<td>The .META. table holds references to all User Table regions</td>
<%java>String description = null;
if (tableName.equals(TableName.META_TABLE_NAME)){
description = "The .META. table holds references to all User Table regions";
} else {
description = "The .NAMESPACE. table holds information about namespaces.";
}
</%java>
<td><% description %></td>
</tr>
</%if>
</%for>
</table>
</%def>
@ -333,11 +348,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
</tr>
<%for HTableDescriptor htDesc : tables%>
<tr>
<td><a href=table.jsp?name=<% htDesc.getNameAsString() %>><% htDesc.getNameAsString() %></a> </td>
<td><a href=table.jsp?name=<% htDesc.getTableName().getNameAsString() %>><% htDesc.getTableName().getNameAsString() %></a> </td>
<%if (frags != null) %>
<td align="center"><% frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %></td>
<td align="center"><% frags.get(htDesc.getTableName().getNameAsString()) != null ? frags.get(htDesc.getTableName().getNameAsString()).intValue() + "%" : "n/a" %></td>
</%if>
<td><% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc.getName()).size() %>
<td><% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc
.getTableName()).size() %>
<td><% htDesc.toStringCustomizedValues() %></td>
</tr>
</%for>
@ -358,9 +374,13 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
<th>Creation Time</th>
</tr>
<%for SnapshotDescription snapshotDesc : snapshots%>
<%java>
TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable());
</%java>
<tr>
<td><a href="snapshot.jsp?name=<% snapshotDesc.getName() %>"><% snapshotDesc.getName() %></a> </td>
<td><a href="table.jsp?name=<% snapshotDesc.getTable() %>"><% snapshotDesc.getTable() %></a></td>
<td><a href="table.jsp?name=<% snapshotTable.getNameAsString() %>"><% snapshotTable.getNameAsString() %></a>
</td>
<td><% new Date(snapshotDesc.getCreationTime()) %></td>
</tr>
</%for>

View File

@ -450,7 +450,7 @@ public class LocalHBaseCluster {
cluster.startup();
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor htd =
new HTableDescriptor(Bytes.toBytes(cluster.getClass().getName()));
new HTableDescriptor(TableName.valueOf(cluster.getClass().getName()));
admin.createTable(htd);
cluster.shutdown();
}

View File

@ -31,19 +31,19 @@ import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Evolving
public interface TableDescriptors {
/**
* @param tablename
* @param tableName
* @return HTableDescriptor for tablename
* @throws IOException
*/
HTableDescriptor get(final String tablename)
HTableDescriptor get(final TableName tableName)
throws IOException;
/**
* @param tablename
* @return HTableDescriptor for tablename
* Get Map of all NamespaceDescriptors for a given namespace.
* @return Map of all descriptors.
* @throws IOException
*/
HTableDescriptor get(final byte[] tablename)
Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException;
/**
@ -68,6 +68,6 @@ public interface TableDescriptors {
* @return Instance of table descriptor or null if none found.
* @throws IOException
*/
HTableDescriptor remove(final String tablename)
HTableDescriptor remove(final TableName tablename)
throws IOException;
}

View File

@ -0,0 +1,205 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
import java.io.IOException;
import java.util.List;
import java.util.NavigableMap;
import java.util.NavigableSet;
import java.util.concurrent.ConcurrentSkipListMap;
/**
* Class servers two purposes:
*
* 1. Broadcast NamespaceDescriptor information via ZK
* (Done by the Master)
* 2. Consume broadcasted NamespaceDescriptor changes
* (Done by the RegionServers)
*
*/
@InterfaceAudience.Private
public class ZKNamespaceManager extends ZooKeeperListener {
private static Log LOG = LogFactory.getLog(ZKNamespaceManager.class);
private final String nsZNode;
private volatile NavigableMap<String,NamespaceDescriptor> cache;
public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException {
super(zkw);
nsZNode = ZooKeeperWatcher.namespaceZNode;
cache = new ConcurrentSkipListMap<String, NamespaceDescriptor>();
}
public void start() throws IOException {
watcher.registerListener(this);
try {
if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) {
List<ZKUtil.NodeAndData> existing =
ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
if (existing != null) {
refreshNodes(existing);
}
} else {
ZKUtil.createWithParents(watcher, nsZNode);
}
} catch (KeeperException e) {
throw new IOException("Failed to initialize ZKNamespaceManager", e);
}
}
public NamespaceDescriptor get(String name) {
return cache.get(name);
}
public void update(NamespaceDescriptor ns) throws IOException {
writeNamespace(ns);
cache.put(ns.getName(), ns);
}
public void remove(String name) throws IOException {
deleteNamespace(name);
cache.remove(name);
}
public NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret =
Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
for(NamespaceDescriptor ns: cache.values()) {
ret.add(ns);
}
return ret;
}
@Override
public void nodeCreated(String path) {
if (nsZNode.equals(path)) {
try {
List<ZKUtil.NodeAndData> nodes =
ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
refreshNodes(nodes);
} catch (KeeperException ke) {
String msg = "Error reading data from zookeeper";
LOG.error(msg, ke);
watcher.abort(msg, ke);
} catch (IOException e) {
String msg = "Error parsing data from zookeeper";
LOG.error(msg, e);
watcher.abort(msg, e);
}
}
}
@Override
public void nodeDeleted(String path) {
if (nsZNode.equals(ZKUtil.getParent(path))) {
String nsName = ZKUtil.getNodeName(path);
cache.remove(nsName);
}
}
@Override
public void nodeDataChanged(String path) {
if (nsZNode.equals(ZKUtil.getParent(path))) {
try {
byte[] data = ZKUtil.getDataAndWatch(watcher, path);
NamespaceDescriptor ns =
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(data));
cache.put(ns.getName(), ns);
} catch (KeeperException ke) {
String msg = "Error reading data from zookeeper for node "+path;
LOG.error(msg, ke);
// only option is to abort
watcher.abort(msg, ke);
} catch (IOException ioe) {
String msg = "Error deserializing namespace: "+path;
LOG.error(msg, ioe);
watcher.abort(msg, ioe);
}
}
}
@Override
public void nodeChildrenChanged(String path) {
if (nsZNode.equals(path)) {
try {
List<ZKUtil.NodeAndData> nodes =
ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
refreshNodes(nodes);
} catch (KeeperException ke) {
LOG.error("Error reading data from zookeeper for path "+path, ke);
watcher.abort("Zookeeper error get node children for path "+path, ke);
} catch (IOException e) {
LOG.error("Error deserializing namespace child from: "+path, e);
watcher.abort("Error deserializing namespace child from: " + path, e);
}
}
}
private void deleteNamespace(String name) throws IOException {
String zNode = ZKUtil.joinZNode(nsZNode, name);
try {
ZKUtil.deleteNode(watcher, zNode);
} catch (KeeperException e) {
LOG.error("Failed updating permissions for namespace "+name, e);
throw new IOException("Failed updating permissions for namespace "+name, e);
}
}
private void writeNamespace(NamespaceDescriptor ns) throws IOException {
String zNode = ZKUtil.joinZNode(nsZNode, ns.getName());
try {
ZKUtil.createWithParents(watcher, zNode);
ZKUtil.updateExistingNodeData(watcher, zNode,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1);
} catch (KeeperException e) {
LOG.error("Failed updating permissions for namespace "+ns.getName(), e);
throw new IOException("Failed updating permissions for namespace "+ns.getName(), e);
}
}
private void refreshNodes(List<ZKUtil.NodeAndData> nodes) throws IOException {
for (ZKUtil.NodeAndData n : nodes) {
if (n.isEmpty()) continue;
String path = n.getNode();
String namespace = ZKUtil.getNodeName(path);
byte[] nodeData = n.getData();
if (LOG.isDebugEnabled()) {
LOG.debug("Updating namespace cache from node "+namespace+" with data: "+
Bytes.toStringBinary(nodeData));
}
NamespaceDescriptor ns =
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(nodeData));
cache.put(ns.getName(), ns);
}
}
}

View File

@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
@ -73,7 +73,7 @@ public class HFileArchiver {
public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTableName()),
HRegion.getRegionDir(rootDir, info));
}
@ -107,7 +107,9 @@ public class HFileArchiver {
// make sure the regiondir lives under the tabledir
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir);
Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
FSUtils.getTableName(tableDir),
regionDir.getName());
FileStatusConverter getAsFile = new FileStatusConverter(fs);
// otherwise, we attempt to archive the store files

View File

@ -75,7 +75,7 @@ public class ConstraintProcessor extends BaseRegionObserver {
if (LOG.isInfoEnabled()) {
LOG.info("Finished loading " + constraints.size()
+ " user Constraints on table: " + new String(desc.getName()));
+ " user Constraints on table: " + desc.getTableName());
}
}

View File

@ -21,10 +21,12 @@ package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@ -59,156 +61,180 @@ public class BaseMasterObserver implements MasterObserver {
@Override
public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void preDeleteTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
final ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException{
}
@Override
public void postDeleteTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
final ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException {
}
@Override
public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HTableDescriptor htd) throws IOException {
TableName tableName, HTableDescriptor htd) throws IOException {
}
@Override
public void postModifyTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void preModifyTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HTableDescriptor htd) throws IOException {
TableName tableName, HTableDescriptor htd) throws IOException {
}
@Override
public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException {
}
@Override
public void postCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException {
}
@Override
public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace) throws IOException {
}
@Override
public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace) throws IOException {
}
@Override
public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException {
}
@Override
public void postModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException {
}
@Override
public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException {
TableName tableName, HColumnDescriptor column) throws IOException {
}
@Override
public void postAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException {
TableName tableName, HColumnDescriptor column) throws IOException {
}
@Override
public void preAddColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HColumnDescriptor column) throws IOException {
}
@Override
public void postAddColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HColumnDescriptor column) throws IOException {
}
@Override
public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor descriptor) throws IOException {
TableName tableName, HColumnDescriptor descriptor) throws IOException {
}
@Override
public void postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor descriptor) throws IOException {
TableName tableName, HColumnDescriptor descriptor) throws IOException {
}
@Override
public void preModifyColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HColumnDescriptor descriptor) throws IOException {
}
@Override
public void postModifyColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
HColumnDescriptor descriptor) throws IOException {
}
@Override
public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, byte[] c) throws IOException {
TableName tableName, byte[] c) throws IOException {
}
@Override
public void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, byte[] c) throws IOException {
TableName tableName, byte[] c) throws IOException {
}
@Override
public void preDeleteColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
byte[] c) throws IOException {
}
@Override
public void postDeleteColumnHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName,
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName,
byte[] c) throws IOException {
}
@Override
public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void postEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void preEnableTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException {
}
@Override
public void postEnableTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException {
}
@Override
public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void postDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException {
TableName tableName) throws IOException {
}
@Override
public void preDisableTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException {
}
@Override
public void postDisableTableHandler(
ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException {
}
@ -346,7 +372,8 @@ public class BaseMasterObserver implements MasterObserver {
@Override
public void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<String> tableNamesList, List<HTableDescriptor> descriptors) throws IOException {
List<TableName> tableNamesList, List<HTableDescriptor> descriptors)
throws IOException {
}
@Override

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
@ -366,10 +367,10 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
*/
class HTableWrapper implements HTableInterface {
private byte[] tableName;
private TableName tableName;
private HTable table;
public HTableWrapper(byte[] tableName) throws IOException {
public HTableWrapper(TableName tableName) throws IOException {
this.tableName = tableName;
this.table = new HTable(conf, tableName);
openTables.add(this);
@ -481,8 +482,14 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
return table.getTableDescriptor();
}
@Override
public byte[] getTableName() {
return tableName;
return tableName.getName();
}
@Override
public TableName getName() {
return table.getName();
}
@Override
@ -667,7 +674,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
* @exception java.io.IOException Exception
*/
@Override
public HTableInterface getTable(byte[] tableName) throws IOException {
public HTableInterface getTable(TableName tableName) throws IOException {
return new HTableWrapper(tableName);
}
}

View File

@ -25,9 +25,11 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@ -96,7 +98,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void preDeleteTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException;
TableName tableName) throws IOException;
/**
* Called after the deleteTable operation has been requested. Called as part
@ -105,7 +107,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void postDeleteTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName) throws IOException;
TableName tableName) throws IOException;
/**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a
@ -116,7 +118,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void preDeleteTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
final ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException;
/**
@ -128,7 +130,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void postDeleteTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx, byte[] tableName)
final ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tableName)
throws IOException;
/**
@ -140,7 +142,7 @@ public interface MasterObserver extends Coprocessor {
* @param htd the HTableDescriptor
*/
void preModifyTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, HTableDescriptor htd) throws IOException;
final TableName tableName, HTableDescriptor htd) throws IOException;
/**
* Called after the modifyTable operation has been requested. Called as part
@ -150,7 +152,7 @@ public interface MasterObserver extends Coprocessor {
* @param htd the HTableDescriptor
*/
void postModifyTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, HTableDescriptor htd) throws IOException;
final TableName tableName, HTableDescriptor htd) throws IOException;
/**
* Called prior to modifying a table's properties. Called as part of modify
@ -162,7 +164,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preModifyTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, HTableDescriptor htd) throws IOException;
final TableName tableName, HTableDescriptor htd) throws IOException;
/**
* Called after to modifying a table's properties. Called as part of modify
@ -174,7 +176,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postModifyTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, HTableDescriptor htd) throws IOException;
final TableName tableName, HTableDescriptor htd) throws IOException;
/**
* Called prior to adding a new column family to the table. Called as part of
@ -184,7 +186,7 @@ public interface MasterObserver extends Coprocessor {
* @param column the HColumnDescriptor
*/
void preAddColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException;
TableName tableName, HColumnDescriptor column) throws IOException;
/**
* Called after the new column family has been created. Called as part of
@ -194,7 +196,7 @@ public interface MasterObserver extends Coprocessor {
* @param column the HColumnDescriptor
*/
void postAddColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException;
TableName tableName, HColumnDescriptor column) throws IOException;
/**
* Called prior to adding a new column family to the table. Called as part of
@ -205,7 +207,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preAddColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException;
TableName tableName, HColumnDescriptor column) throws IOException;
/**
* Called after the new column family has been created. Called as part of
@ -216,7 +218,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postAddColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor column) throws IOException;
TableName tableName, HColumnDescriptor column) throws IOException;
/**
* Called prior to modifying a column family's attributes. Called as part of
@ -226,7 +228,7 @@ public interface MasterObserver extends Coprocessor {
* @param descriptor the HColumnDescriptor
*/
void preModifyColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte [] tableName, HColumnDescriptor descriptor) throws IOException;
TableName tableName, HColumnDescriptor descriptor) throws IOException;
/**
* Called after the column family has been updated. Called as part of modify
@ -236,7 +238,7 @@ public interface MasterObserver extends Coprocessor {
* @param descriptor the HColumnDescriptor
*/
void postModifyColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor descriptor) throws IOException;
TableName tableName, HColumnDescriptor descriptor) throws IOException;
/**
* Called prior to modifying a column family's attributes. Called as part of
@ -247,7 +249,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preModifyColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor descriptor) throws IOException;
TableName tableName, HColumnDescriptor descriptor) throws IOException;
/**
* Called after the column family has been updated. Called as part of modify
@ -258,7 +260,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postModifyColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
byte[] tableName, HColumnDescriptor descriptor) throws IOException;
TableName tableName, HColumnDescriptor descriptor) throws IOException;
/**
@ -269,7 +271,7 @@ public interface MasterObserver extends Coprocessor {
* @param c the column
*/
void preDeleteColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte [] tableName, final byte[] c) throws IOException;
final TableName tableName, final byte[] c) throws IOException;
/**
* Called after the column family has been deleted. Called as part of delete
@ -279,7 +281,7 @@ public interface MasterObserver extends Coprocessor {
* @param c the column
*/
void postDeleteColumn(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte [] tableName, final byte[] c) throws IOException;
final TableName tableName, final byte[] c) throws IOException;
/**
* Called prior to deleting the entire column family. Called as part of
@ -290,7 +292,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preDeleteColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, final byte[] c) throws IOException;
final TableName tableName, final byte[] c) throws IOException;
/**
* Called after the column family has been deleted. Called as part of
@ -301,7 +303,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postDeleteColumnHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName, final byte[] c) throws IOException;
final TableName tableName, final byte[] c) throws IOException;
/**
* Called prior to enabling a table. Called as part of enable table RPC call.
@ -310,7 +312,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void preEnableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called after the enableTable operation has been requested. Called as part
@ -319,7 +321,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void postEnableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called prior to enabling a table. Called as part of enable table handler
@ -330,7 +332,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preEnableTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called after the enableTable operation has been requested. Called as part
@ -340,7 +342,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postEnableTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called prior to disabling a table. Called as part of disable table RPC
@ -350,7 +352,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void preDisableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called after the disableTable operation has been requested. Called as part
@ -359,7 +361,7 @@ public interface MasterObserver extends Coprocessor {
* @param tableName the name of the table
*/
void postDisableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called prior to disabling a table. Called as part of disable table handler
@ -370,7 +372,7 @@ public interface MasterObserver extends Coprocessor {
*/
void preDisableTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called after the disableTable operation has been requested. Called as part
@ -380,7 +382,7 @@ public interface MasterObserver extends Coprocessor {
*/
void postDisableTableHandler(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final byte[] tableName) throws IOException;
final TableName tableName) throws IOException;
/**
* Called prior to moving a given region from one region server to another.
@ -619,7 +621,8 @@ public interface MasterObserver extends Coprocessor {
* @throws IOException
*/
void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<String> tableNamesList, List<HTableDescriptor> descriptors) throws IOException;
List<TableName> tableNamesList,
List<HTableDescriptor> descriptors) throws IOException;
/**
* Called after a getTableDescriptors request has been processed.
@ -629,4 +632,58 @@ public interface MasterObserver extends Coprocessor {
*/
void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors) throws IOException;
/**
* Called before a new namespace is created by
* {@link org.apache.hadoop.hbase.master.HMaster}.
* It can't bypass the default action, e.g., ctx.bypass() won't have effect.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor for the table
* @throws IOException
*/
void preCreateNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException;
/**
* Called after the createNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor for the table
* @throws IOException
*/
void postCreateNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException;
/**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a
* namespace
* It can't bypass the default action, e.g., ctx.bypass() won't have effect.
* @param ctx the environment to interact with the framework and master
* @param namespace the name of the namespace
*/
void preDeleteNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException;
/**
* Called after the deleteNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param namespace the name of the namespace
*/
void postDeleteNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException;
/**
* Called prior to modifying a namespace's properties.
* It can't bypass the default action, e.g., ctx.bypass() won't have effect.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor
*/
void preModifyNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException;
/**
* Called after the modifyNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor
*/
void postModifyNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException;
}

View File

@ -262,7 +262,7 @@ policy implementations, perhaps) ahead of observers.
"TestClassloading.jar");
// create a table that references the jar
HTableDescriptor htd = new HTableDescriptor(getClass().getName());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getClass().getTableName()));
htd.addFamily(new HColumnDescriptor("test"));
htd.setValue("Coprocessor$1",
path.toString() +

View File

@ -28,13 +28,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.Pair;
/**
* HFileLink describes a link to an hfile.
@ -66,12 +67,15 @@ public class HFileLink extends FileLink {
* and the bulk loaded (_SeqId_[0-9]+_) hfiles.
*/
public static final String LINK_NAME_REGEX =
String.format("%s=%s-%s", HTableDescriptor.VALID_USER_TABLE_REGEX,
String.format("(?:(?:%s=)?)%s=%s-%s",
TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX,
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX);
/** Define the HFile Link name parser in the form of: table=region-hfile */
private static final Pattern LINK_NAME_PATTERN =
Pattern.compile(String.format("^(%s)=(%s)-(%s)$", HTableDescriptor.VALID_USER_TABLE_REGEX,
//made package private for testing
static final Pattern LINK_NAME_PATTERN =
Pattern.compile(String.format("^(?:(%s)(?:\\=))?(%s)=(%s)-(%s)$",
TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX,
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX));
/**
@ -79,7 +83,8 @@ public class HFileLink extends FileLink {
* that can be found in /hbase/table/region/family/
*/
private static final Pattern REF_OR_HFILE_LINK_PATTERN =
Pattern.compile(String.format("^(%s)=(%s)-(.+)$", HTableDescriptor.VALID_USER_TABLE_REGEX,
Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$",
TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX,
HRegionInfo.ENCODED_REGION_NAME_REGEX));
private final Path archivePath;
@ -138,8 +143,7 @@ public class HFileLink extends FileLink {
public static boolean isHFileLink(String fileName) {
Matcher m = LINK_NAME_PATTERN.matcher(fileName);
if (!m.matches()) return false;
return m.groupCount() > 2 && m.group(3) != null && m.group(2) != null && m.group(1) != null;
return m.groupCount() > 2 && m.group(4) != null && m.group(3) != null && m.group(2) != null;
}
/**
@ -159,11 +163,13 @@ public class HFileLink extends FileLink {
}
// Convert the HFileLink name into a real table/region/cf/hfile path.
String tableName = m.group(1);
String regionName = m.group(2);
String hfileName = m.group(3);
TableName tableName = TableName.valueOf(m.group(1), m.group(2));
String regionName = m.group(3);
String hfileName = m.group(4);
String familyName = path.getParent().getName();
return new Path(new Path(tableName, regionName), new Path(familyName, hfileName));
Path tableDir = FSUtils.getTableDir(new Path("./"), tableName);
return new Path(tableDir, new Path(regionName, new Path(familyName,
hfileName)));
}
/**
@ -177,7 +183,7 @@ public class HFileLink extends FileLink {
if (!m.matches()) {
throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
}
return(m.group(3));
return(m.group(4));
}
/**
@ -191,7 +197,7 @@ public class HFileLink extends FileLink {
if (!m.matches()) {
throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
}
return(m.group(2));
return(m.group(3));
}
/**
@ -200,12 +206,12 @@ public class HFileLink extends FileLink {
* @param fileName HFileLink file name
* @return the name of the referenced Table
*/
public static String getReferencedTableName(final String fileName) {
public static TableName getReferencedTableName(final String fileName) {
Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
if (!m.matches()) {
throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
}
return(m.group(1));
return(TableName.valueOf(m.group(1), m.group(2)));
}
/**
@ -217,7 +223,7 @@ public class HFileLink extends FileLink {
*/
public static String createHFileLinkName(final HRegionInfo hfileRegionInfo,
final String hfileName) {
return createHFileLinkName(hfileRegionInfo.getTableNameAsString(),
return createHFileLinkName(hfileRegionInfo.getTableName(),
hfileRegionInfo.getEncodedName(), hfileName);
}
@ -229,9 +235,12 @@ public class HFileLink extends FileLink {
* @param hfileName - Linked HFile name
* @return file name of the HFile Link
*/
public static String createHFileLinkName(final String tableName,
public static String createHFileLinkName(final TableName tableName,
final String regionName, final String hfileName) {
return String.format("%s=%s-%s", tableName, regionName, hfileName);
String s = String.format("%s=%s-%s",
tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='),
regionName, hfileName);
return s;
}
/**
@ -251,7 +260,7 @@ public class HFileLink extends FileLink {
public static boolean create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final HRegionInfo hfileRegionInfo,
final String hfileName) throws IOException {
String linkedTable = hfileRegionInfo.getTableNameAsString();
TableName linkedTable = hfileRegionInfo.getTableName();
String linkedRegion = hfileRegionInfo.getEncodedName();
return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName);
}
@ -272,11 +281,12 @@ public class HFileLink extends FileLink {
* @throws IOException on file or parent directory creation failure
*/
public static boolean create(final Configuration conf, final FileSystem fs,
final Path dstFamilyPath, final String linkedTable, final String linkedRegion,
final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion,
final String hfileName) throws IOException {
String familyName = dstFamilyPath.getName();
String regionName = dstFamilyPath.getParent().getName();
String tableName = dstFamilyPath.getParent().getParent().getName();
String tableName = FSUtils.getTableName(dstFamilyPath.getParent().getParent())
.getNameAsString();
String name = createHFileLinkName(linkedTable, linkedRegion, hfileName);
String refName = createBackReferenceName(tableName, regionName);
@ -323,14 +333,18 @@ public class HFileLink extends FileLink {
if (!m.matches()) {
throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!");
}
return create(conf, fs, dstFamilyPath, m.group(1), m.group(2), m.group(3));
return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)),
m.group(3), m.group(4));
}
/**
* Create the back reference name
*/
private static String createBackReferenceName(final String tableName, final String regionName) {
return regionName + "." + tableName;
//package-private for testing
static String createBackReferenceName(final String tableNameStr,
final String regionName) {
return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '=');
}
/**
@ -342,20 +356,31 @@ public class HFileLink extends FileLink {
* @throws IOException on unexpected error.
*/
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
int separatorIndex = linkRefPath.getName().indexOf('.');
String linkRegionName = linkRefPath.getName().substring(0, separatorIndex);
String linkTableName = linkRefPath.getName().substring(separatorIndex + 1);
Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());
TableName linkTableName = p.getFirst();
String linkRegionName = p.getSecond();
String hfileName = getBackReferenceFileName(linkRefPath.getParent());
Path familyPath = linkRefPath.getParent().getParent();
Path regionPath = familyPath.getParent();
Path tablePath = regionPath.getParent();
String linkName = createHFileLinkName(tablePath.getName(), regionPath.getName(), hfileName);
Path linkTableDir = FSUtils.getTablePath(rootDir, linkTableName);
String linkName = createHFileLinkName(FSUtils.getTableName(tablePath),
regionPath.getName(), hfileName);
Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName);
Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
return new Path(new Path(regionDir, familyPath.getName()), linkName);
}
static Pair<TableName, String> parseBackReferenceName(String name) {
int separatorIndex = name.indexOf('.');
String linkRegionName = name.substring(0, separatorIndex);
String tableSubstr = name.substring(separatorIndex + 1)
.replace('=', TableName.NAMESPACE_DELIM);
TableName linkTableName = TableName.valueOf(tableSubstr);
return new Pair<TableName, String>(linkTableName, linkRegionName);
}
/**
* Get the full path of the HFile referenced by the back reference
*
@ -368,4 +393,5 @@ public class HFileLink extends FileLink {
throws IOException {
return getHFileFromBackReference(FSUtils.getRootDir(conf), linkRefPath);
}
}

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
@ -140,7 +141,7 @@ public class HFilePrettyPrinter {
byte[] rn = Bytes.toBytes(regionName);
byte[][] hri = HRegionInfo.parseRegionName(rn);
Path rootDir = FSUtils.getRootDir(conf);
Path tableDir = new Path(rootDir, Bytes.toString(hri[0]));
Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf(hri[0]));
String enc = HRegionInfo.encodeRegionName(rn);
Path regionDir = new Path(tableDir, enc);
if (verbose)

View File

@ -66,6 +66,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -2138,9 +2139,9 @@ public class RpcServer implements RpcServerInterface {
params[1] instanceof Operation) {
// if the slow process is a query, we want to log its table as well
// as its own fingerprint
byte [] tableName =
HRegionInfo.parseRegionName((byte[]) params[0])[0];
responseInfo.put("table", Bytes.toStringBinary(tableName));
TableName tableName = TableName.valueOf(
HRegionInfo.parseRegionName((byte[]) params[0])[0]);
responseInfo.put("table", tableName.getNameAsString());
// annotate the response map with operation details
responseInfo.putAll(((Operation) params[1]).toMap());
// report to the log file

View File

@ -135,7 +135,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
String regionLocation = table.getRegionLocation(startKeys[startPos]).
getHostname();
splits[i] = new TableSplit(this.table.getTableName(),
splits[i] = new TableSplit(this.table.getName(),
startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
HConstants.EMPTY_START_ROW, regionLocation);
LOG.info("split: " + i + "->" + splits[i]);

View File

@ -22,6 +22,7 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapred.InputSplit;
@ -31,14 +32,14 @@ import org.apache.hadoop.mapred.InputSplit;
*/
@Deprecated
public class TableSplit implements InputSplit, Comparable<TableSplit> {
private byte [] m_tableName;
private TableName m_tableName;
private byte [] m_startRow;
private byte [] m_endRow;
private String m_regionLocation;
/** default constructor */
public TableSplit() {
this(HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
this((TableName)null, HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY, "");
}
@ -49,7 +50,7 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
* @param endRow
* @param location
*/
public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow,
public TableSplit(TableName tableName, byte [] startRow, byte [] endRow,
final String location) {
this.m_tableName = tableName;
this.m_startRow = startRow;
@ -57,11 +58,21 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
this.m_regionLocation = location;
}
public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow,
final String location) {
this(TableName.valueOf(tableName), startRow, endRow, location);
}
/** @return table name */
public byte [] getTableName() {
public TableName getTable() {
return this.m_tableName;
}
/** @return table name */
public byte [] getTableName() {
return this.m_tableName.getName();
}
/** @return starting row key */
public byte [] getStartRow() {
return this.m_startRow;
@ -87,14 +98,14 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
}
public void readFields(DataInput in) throws IOException {
this.m_tableName = Bytes.readByteArray(in);
this.m_tableName = TableName.valueOf(Bytes.readByteArray(in));
this.m_startRow = Bytes.readByteArray(in);
this.m_endRow = Bytes.readByteArray(in);
this.m_regionLocation = Bytes.toString(Bytes.readByteArray(in));
}
public void write(DataOutput out) throws IOException {
Bytes.writeByteArray(out, this.m_tableName);
Bytes.writeByteArray(out, this.m_tableName.getName());
Bytes.writeByteArray(out, this.m_startRow);
Bytes.writeByteArray(out, this.m_endRow);
Bytes.writeByteArray(out, Bytes.toBytes(this.m_regionLocation));
@ -117,7 +128,7 @@ public class TableSplit implements InputSplit, Comparable<TableSplit> {
return false;
}
TableSplit other = (TableSplit)o;
return Bytes.equals(m_tableName, other.m_tableName) &&
return m_tableName.equals(other.m_tableName) &&
Bytes.equals(m_startRow, other.m_startRow) &&
Bytes.equals(m_endRow, other.m_endRow) &&
m_regionLocation.equals(other.m_regionLocation);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -358,7 +359,7 @@ public class ImportTsv extends Configured implements Tool {
private static void createTable(HBaseAdmin admin, String tableName, String[] columns)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName.getBytes());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
Set<String> cfSet = new HashSet<String>();
for (String aColumn : columns) {
if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn)) continue;

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@ -61,7 +62,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionServerCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCaller;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient;
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
@ -200,7 +200,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
{
final HConnection conn = table.getConnection();
if (!conn.isTableAvailable(table.getTableName())) {
if (!conn.isTableAvailable(table.getName())) {
throw new TableNotFoundException("Table " +
Bytes.toStringBinary(table.getTableName()) +
"is not currently available.");
@ -261,7 +261,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
if(User.isSecurityEnabled()) {
userToken = fs.getDelegationToken("renewer");
}
bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getTableName());
bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName());
}
// Assumes that region splits can happen while this occurs.
@ -339,7 +339,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
public List<LoadQueueItem> call() throws Exception {
List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getTableName(), first, lqis);
List<LoadQueueItem> toRetry =
tryAtomicRegionLoad(conn, table.getName(), first, lqis);
return toRetry;
}
};
@ -420,8 +421,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
}
// unique file name for the table
String getUniqueName(byte[] tableName) {
String name = Bytes.toStringBinary(tableName) + "," + regionCount.incrementAndGet();
String getUniqueName(TableName tableName) {
String name = tableName + "," + regionCount.incrementAndGet();
return name;
}
@ -437,7 +438,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
LOG.info("HFile at " + hfilePath + " no longer fits inside a single " +
"region. Splitting...");
String uniqueName = getUniqueName(table.getTableName());
String uniqueName = getUniqueName(table.getName());
HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family);
Path botOut = new Path(tmpDir, uniqueName + ".bottom");
Path topOut = new Path(tmpDir, uniqueName + ".top");
@ -530,7 +531,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* failure
*/
protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
byte[] tableName, final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
final TableName tableName,
final byte[] first, Collection<LoadQueueItem> lqis) throws IOException {
final List<Pair<byte[], String>> famPaths =
new ArrayList<Pair<byte[], String>>(lqis.size());
@ -595,7 +597,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
if (!success) {
LOG.warn("Attempt to bulk load region containing "
+ Bytes.toStringBinary(first) + " into table "
+ Bytes.toStringBinary(tableName) + " with files " + lqis
+ tableName + " with files " + lqis
+ " failed. This is recoverable and they will be retried.");
toRetry.addAll(lqis); // return lqi's to retry
}
@ -678,7 +680,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
return !HFile.isReservedFileInfoKey(key);
}
private boolean doesTableExist(String tableName) throws Exception {
private boolean doesTableExist(TableName tableName) throws Exception {
return hbAdmin.tableExists(tableName);
}
@ -716,7 +718,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* If the table is created for the first time, then "completebulkload" reads the files twice.
* More modifications necessary if we want to avoid doing it.
*/
private void createTable(String tableName, String dirPath) throws Exception {
private void createTable(TableName tableName, String dirPath) throws Exception {
Path hfofDir = new Path(dirPath);
FileSystem fs = hfofDir.getFileSystem(getConf());
@ -797,7 +799,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
}
String dirPath = args[0];
String tableName = args[1];
TableName tableName = TableName.valueOf(args[1]);
boolean tableExists = this.doesTableExist(tableName);
if (!tableExists) this.createTable(tableName,dirPath);

View File

@ -152,7 +152,8 @@ public abstract class MultiTableInputFormatBase extends
stopRow) <= 0) && keys.getSecond()[i].length > 0 ? keys
.getSecond()[i] : stopRow;
InputSplit split =
new TableSplit(tableName, scan, splitStart, splitStop, regionLocation);
new TableSplit(table.getName(),
scan, splitStart, splitStop, regionLocation);
splits.add(split);
if (LOG.isDebugEnabled())
LOG.debug("getSplits: split -> " + (count++) + " -> " + split);

View File

@ -166,7 +166,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
throw new IOException("Expecting at least one region.");
}
List<InputSplit> splits = new ArrayList<InputSplit>(1);
InputSplit split = new TableSplit(table.getTableName(),
InputSplit split = new TableSplit(table.getName(),
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0]);
splits.add(split);
@ -206,7 +206,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
keys.getSecond()[i].length > 0 ?
keys.getSecond()[i] : stopRow;
InputSplit split = new TableSplit(table.getTableName(),
InputSplit split = new TableSplit(table.getName(),
splitStart, splitStop, regionLocation);
splits.add(split);
if (LOG.isDebugEnabled()) {

View File

@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
@ -76,7 +77,7 @@ implements Writable, Comparable<TableSplit> {
}
private static final Version VERSION = Version.INITIAL;
private byte [] tableName;
private TableName tableName;
private byte [] startRow;
private byte [] endRow;
private String regionLocation;
@ -84,7 +85,7 @@ implements Writable, Comparable<TableSplit> {
/** Default constructor. */
public TableSplit() {
this(HConstants.EMPTY_BYTE_ARRAY, null, HConstants.EMPTY_BYTE_ARRAY,
this(null, null, HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY, "");
}
@ -97,7 +98,7 @@ implements Writable, Comparable<TableSplit> {
* @param endRow The end row of the split.
* @param location The location of the region.
*/
public TableSplit(byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location) {
this.tableName = tableName;
try {
@ -119,7 +120,7 @@ implements Writable, Comparable<TableSplit> {
* @param endRow The end row of the split.
* @param location The location of the region.
*/
public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow,
public TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location) {
this(tableName, null, startRow, endRow, location);
}
@ -139,7 +140,7 @@ implements Writable, Comparable<TableSplit> {
*
* @return The table name.
*/
public byte [] getTableName() {
public TableName getTableName() {
return tableName;
}
@ -216,8 +217,9 @@ implements Writable, Comparable<TableSplit> {
version = Version.fromCode(len);
len = WritableUtils.readVInt(in);
}
tableName = new byte[len];
in.readFully(tableName);
byte[] tableNameBytes = new byte[len];
in.readFully(tableNameBytes);
tableName = TableName.valueOf(tableNameBytes);
startRow = Bytes.readByteArray(in);
endRow = Bytes.readByteArray(in);
regionLocation = Bytes.toString(Bytes.readByteArray(in));
@ -235,7 +237,7 @@ implements Writable, Comparable<TableSplit> {
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, VERSION.code);
Bytes.writeByteArray(out, tableName);
Bytes.writeByteArray(out, tableName.getName());
Bytes.writeByteArray(out, startRow);
Bytes.writeByteArray(out, endRow);
Bytes.writeByteArray(out, Bytes.toBytes(regionLocation));
@ -266,7 +268,7 @@ implements Writable, Comparable<TableSplit> {
// If The table name of the two splits is the same then compare start row
// otherwise compare based on table names
int tableNameComparison =
Bytes.compareTo(getTableName(), split.getTableName());
getTableName().compareTo(split.getTableName());
return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo(
getStartRow(), split.getStartRow());
}
@ -276,7 +278,7 @@ implements Writable, Comparable<TableSplit> {
if (o == null || !(o instanceof TableSplit)) {
return false;
}
return Bytes.equals(tableName, ((TableSplit)o).tableName) &&
return tableName.equals(((TableSplit)o).tableName) &&
Bytes.equals(startRow, ((TableSplit)o).startRow) &&
Bytes.equals(endRow, ((TableSplit)o).endRow) &&
regionLocation.equals(((TableSplit)o).regionLocation);
@ -284,7 +286,7 @@ implements Writable, Comparable<TableSplit> {
@Override
public int hashCode() {
int result = tableName != null ? Arrays.hashCode(tableName) : 0;
int result = tableName != null ? tableName.hashCode() : 0;
result = 31 * result + (scan != null ? scan.hashCode() : 0);
result = 31 * result + (startRow != null ? Arrays.hashCode(startRow) : 0);
result = 31 * result + (endRow != null ? Arrays.hashCode(endRow) : 0);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
@ -79,7 +80,7 @@ public class WALPlayer extends Configured implements Tool {
throws IOException {
try {
// skip all other tables
if (Bytes.equals(table, key.getTablename())) {
if (Bytes.equals(table, key.getTablename().getName())) {
for (KeyValue kv : value.getKeyValues()) {
if (WALEdit.isMetaEditFamily(kv.getFamily())) continue;
context.write(new ImmutableBytesWritable(kv.getRow()), kv);
@ -108,7 +109,8 @@ public class WALPlayer extends Configured implements Tool {
*/
static class HLogMapper
extends Mapper<HLogKey, WALEdit, ImmutableBytesWritable, Mutation> {
private Map<byte[], byte[]> tables = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
private Map<TableName, TableName> tables =
new TreeMap<TableName, TableName>();
@Override
public void map(HLogKey key, WALEdit value,
@ -116,10 +118,10 @@ public class WALPlayer extends Configured implements Tool {
throws IOException {
try {
if (tables.isEmpty() || tables.containsKey(key.getTablename())) {
byte[] targetTable = tables.isEmpty() ?
TableName targetTable = tables.isEmpty() ?
key.getTablename() :
tables.get(key.getTablename());
ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable);
ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName());
Put put = null;
Delete del = null;
KeyValue lastKV = null;
@ -168,7 +170,8 @@ public class WALPlayer extends Configured implements Tool {
}
int i = 0;
for (String table : tablesToUse) {
tables.put(Bytes.toBytes(table), Bytes.toBytes(tableMap[i++]));
tables.put(TableName.valueOf(table),
TableName.valueOf(tableMap[i++]));
}
}
}

View File

@ -39,11 +39,15 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionTransition;
@ -53,7 +57,6 @@ import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
@ -89,7 +92,6 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
@ -355,7 +357,7 @@ public class AssignmentManager extends ZooKeeperListener {
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(byte[] tableName)
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
@ -450,7 +452,8 @@ public class AssignmentManager extends ZooKeeperListener {
// its a clean cluster startup, else its a failover.
Map<HRegionInfo, ServerName> regions = regionStates.getRegionAssignments();
for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
if (!e.getKey().isMetaTable() && e.getValue() != null) {
if (!HTableDescriptor.isSystemTable(e.getKey().getTableName())
&& e.getValue() != null) {
LOG.debug("Found " + e + " out on cluster");
failover = true;
break;
@ -1259,7 +1262,7 @@ public class AssignmentManager extends ZooKeeperListener {
LOG.info("The master has opened "
+ regionNameStr + " that was online on " + serverName);
boolean disabled = getZKTable().isDisablingOrDisabledTable(
regionInfo.getTableNameAsString());
regionInfo.getTableName());
if (!serverManager.isServerOnline(serverName) && !disabled) {
LOG.info("Opened " + regionNameStr
+ "but the region server is offline, reassign the region");
@ -1863,7 +1866,7 @@ public class AssignmentManager extends ZooKeeperListener {
// When we have a case such as all the regions are added directly into .META. and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
String tableName = region.getTableNameAsString();
TableName tableName = region.getTableName();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
@ -2043,7 +2046,7 @@ public class AssignmentManager extends ZooKeeperListener {
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
String tableName = region.getTableNameAsString();
TableName tableName = region.getTableName();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
@ -2520,10 +2523,10 @@ public class AssignmentManager extends ZooKeeperListener {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
Set<String> disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Scan META for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions = null;
Map<HRegionInfo, ServerName> allRegions;
if (this.shouldAssignRegionsWithFavoredNodes) {
allRegions = FavoredNodeAssignmentHelper.fullScan(
catalogTracker, disabledOrDisablingOrEnabling, true, (FavoredNodeLoadBalancer)balancer);
@ -2531,7 +2534,18 @@ public class AssignmentManager extends ZooKeeperListener {
allRegions = MetaReader.fullScan(
catalogTracker, disabledOrDisablingOrEnabling, true);
}
if (allRegions == null || allRegions.isEmpty()) return;
if (allRegions == null) return;
//remove system tables because they would have been assigned earlier
for(Iterator<HRegionInfo> iter = allRegions.keySet().iterator();
iter.hasNext();) {
if (HTableDescriptor.isSystemTable(iter.next().getTableName())) {
iter.remove();
}
}
if (allRegions.isEmpty()) return;
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
@ -2545,7 +2559,7 @@ public class AssignmentManager extends ZooKeeperListener {
}
for (HRegionInfo hri : allRegions.keySet()) {
String tableName = hri.getTableNameAsString();
TableName tableName = hri.getTableName();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
@ -2586,10 +2600,10 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<String> enablingTables = ZKTable.getEnablingTables(watcher);
Set<String> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<String> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
@ -2607,7 +2621,7 @@ public class AssignmentManager extends ZooKeeperListener {
ServerName regionLocation = region.getSecond();
if (regionInfo == null) continue;
regionStates.createRegionState(regionInfo);
String tableName = regionInfo.getTableNameAsString();
TableName tableName = regionInfo.getTableName();
if (regionLocation == null) {
// regionLocation could be null if createTable didn't finish properly.
// When createTable is in progress, HMaster restarts.
@ -2678,14 +2692,14 @@ public class AssignmentManager extends ZooKeeperListener {
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<String> disablingTables = ZKTable.getDisablingTables(watcher);
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (String tableName : disablingTables) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName.getBytes(), catalogTracker,
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
@ -2701,16 +2715,16 @@ public class AssignmentManager extends ZooKeeperListener {
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<String> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (String tableName : enablingTables) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
new EnableTableHandler(this.server, tableName.getBytes(),
new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true).prepare().process();
}
}
@ -3109,8 +3123,7 @@ public class AssignmentManager extends ZooKeeperListener {
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTableNameAsString())) {
if (zkTable.isDisablingOrDisabledTable(hri.getTableName())) {
it.remove();
regionStates.regionOffline(hri);
continue;
@ -3143,7 +3156,7 @@ public class AssignmentManager extends ZooKeeperListener {
// that case. This is not racing with the region server itself since RS
// report is done after the split transaction completed.
if (this.zkTable.isDisablingOrDisabledTable(
parent.getTableNameAsString())) {
parent.getTableName())) {
unassign(a);
unassign(b);
}
@ -3166,7 +3179,7 @@ public class AssignmentManager extends ZooKeeperListener {
// the master to disable, we need to make sure we close those regions in
// that case. This is not racing with the region server itself since RS
// report is done after the regions merge transaction completed.
if (this.zkTable.isDisablingOrDisabledTable(merged.getTableNameAsString())) {
if (this.zkTable.isDisablingOrDisabledTable(merged.getTableName())) {
unassign(merged);
}
}
@ -3200,7 +3213,7 @@ public class AssignmentManager extends ZooKeeperListener {
zkEventWorkers.shutdownNow();
}
protected void setEnabledTable(String tableName) {
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Triple;
@ -128,8 +130,8 @@ public class CatalogJanitor extends Chore {
* @throws IOException
*/
Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
final byte[] tableName) throws IOException {
final boolean isTableSpecified = (tableName != null && tableName.length != 0);
final TableName tableName) throws IOException {
final boolean isTableSpecified = (tableName != null);
// TODO: Only works with single .META. region currently. Fix.
final AtomicInteger count = new AtomicInteger(0);
// Keep Map of found split parents. There are candidates for cleanup.
@ -147,7 +149,7 @@ public class CatalogJanitor extends Chore {
HRegionInfo info = HRegionInfo.getHRegionInfo(r);
if (info == null) return true; // Keep scanning
if (isTableSpecified
&& Bytes.compareTo(info.getTableName(), tableName) > 0) {
&& info.getTableName().compareTo(tableName) > 0) {
// Another table, stop scanning
return false;
}
@ -182,10 +184,9 @@ public class CatalogJanitor extends Chore {
final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = HTableDescriptor.getTableDir(rootdir,
Path tabledir = FSUtils.getTableDir(rootdir,
mergedRegion.getTableName());
HTableDescriptor htd = getTableDescriptor(mergedRegion
.getTableNameAsString());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
@ -289,7 +290,7 @@ public class CatalogJanitor extends Chore {
if (left == null) return -1;
if (right == null) return 1;
// Same table name.
int result = Bytes.compareTo(left.getTableName(),
int result = left.getTableName().compareTo(
right.getTableName());
if (result != 0) return result;
// Compare start keys.
@ -374,7 +375,7 @@ public class CatalogJanitor extends Chore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName());
Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTableName());
HRegionFileSystem regionFs = null;
try {
@ -386,7 +387,7 @@ public class CatalogJanitor extends Chore {
}
boolean references = false;
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString());
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
@ -395,7 +396,7 @@ public class CatalogJanitor extends Chore {
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
}
private HTableDescriptor getTableDescriptor(final String tableName)
private HTableDescriptor getTableDescriptor(final TableName tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(tableName);
}

View File

@ -28,6 +28,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -40,6 +41,8 @@ import java.util.concurrent.atomic.AtomicReference;
import javax.management.ObjectName;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -49,7 +52,11 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -72,12 +79,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType;
@ -109,6 +111,7 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@ -206,6 +209,7 @@ import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -274,6 +278,10 @@ MasterServices, Server {
// Set back to false after we stop rpcServer. Used by tests.
private volatile boolean rpcServerOpen = false;
/** Namespace stuff */
private TableNamespaceManager tableNamespaceManager;
private NamespaceJanitor namespaceJanitorChore;
/**
* This servers address.
*/
@ -750,6 +758,7 @@ MasterServices, Server {
*/
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery);
@ -847,6 +856,10 @@ MasterServices, Server {
this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs);
}
status.setStatus("Assigning System tables");
// Make sure system tables are assigned before proceeding.
assignSystemTables(status);
enableServerShutdownHandler();
status.setStatus("Submitting log splitting work for previously failed region servers");
@ -876,7 +889,9 @@ MasterServices, Server {
this.clusterStatusChore = getAndStartClusterStatusChore(this);
this.balancerChore = getAndStartBalancerChore(this);
this.catalogJanitorChore = new CatalogJanitor(this, this);
this.namespaceJanitorChore = new NamespaceJanitor(this);
startCatalogJanitorChore();
startNamespaceJanitorChore();
}
status.markComplete("Initialization successful");
@ -907,6 +922,14 @@ MasterServices, Server {
Threads.setDaemonThreadRunning(catalogJanitorChore.getThread());
}
/**
* Useful for testing purpose also where we have
* master restart scenarios.
*/
protected void startNamespaceJanitorChore() {
Threads.setDaemonThreadRunning(namespaceJanitorChore.getThread());
}
/**
* Create a {@link ServerManager} instance.
* @param master
@ -980,9 +1003,9 @@ MasterServices, Server {
this.catalogTracker.getMetaLocation());
}
enableCatalogTables(Bytes.toString(HConstants.META_TABLE_NAME));
LOG.info(".META. assigned=" + assigned + ", rit=" + rit + ", location="
+ catalogTracker.getMetaLocation());
enableMeta(TableName.META_TABLE_NAME);
LOG.info(".META. assigned=" + assigned + ", rit=" + rit +
", location=" + catalogTracker.getMetaLocation());
status.setStatus("META assigned.");
}
@ -998,6 +1021,82 @@ MasterServices, Server {
}
}
private void splitLogBeforeAssignment(ServerName currentServer,
Set<HRegionInfo> regions) throws IOException {
if (this.distributedLogReplay) {
this.fileSystemManager.prepareLogReplay(currentServer, regions);
} else {
// In recovered.edits mode: create recovered edits file for region server
this.fileSystemManager.splitLog(currentServer);
}
}
void assignSystemTables(MonitoredTask status)
throws InterruptedException, IOException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(zooKeeper);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(zooKeeper));
// Scan META for all system regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions =
MetaReader.fullScan(catalogTracker, disabledOrDisablingOrEnabling, true);
for(Iterator<HRegionInfo> iter = allRegions.keySet().iterator();
iter.hasNext();) {
if (!HTableDescriptor.isSystemTable(iter.next().getTableName())) {
iter.remove();
}
}
int assigned = 0;
boolean beingExpired = false;
status.setStatus("Assigning System Regions");
for(Map.Entry<HRegionInfo, ServerName> entry: allRegions.entrySet()) {
HRegionInfo regionInfo = entry.getKey();
ServerName currServer = entry.getValue();
assignmentManager.getRegionStates().createRegionState(regionInfo);
boolean rit = this.assignmentManager
.processRegionInTransitionAndBlockUntilAssigned(regionInfo);
boolean regionLocation = false;
if (currServer != null) {
regionLocation = verifyRegionLocation(currServer, regionInfo);
}
if (!rit && !regionLocation) {
beingExpired = expireIfOnline(currServer);
if (beingExpired) {
splitLogBeforeAssignment(currServer, Sets.newHashSet(regionInfo));
}
assignmentManager.assign(regionInfo, true);
// Make sure a region location is set.
this.assignmentManager.waitForAssignment(regionInfo);
assigned++;
if (beingExpired && this.distributedLogReplay) {
// In Replay WAL Mode, we need the new region server online
this.fileSystemManager.splitLog(currServer);
}
} else if (rit && !regionLocation) {
if (!waitVerifiedRegionLocation(regionInfo)) return;
assigned++;
} else {
// Region already assigned. We didn't assign it. Add to in-memory state.
this.assignmentManager.regionOnline(regionInfo, currServer);
}
if (!this.assignmentManager.getZKTable().isEnabledTable(regionInfo.getTableName())) {
this.assignmentManager.setEnabledTable(regionInfo.getTableName());
}
LOG.info("System Regions assigned=" + assigned + ", rit=" + rit +
", location=" + catalogTracker.getMetaLocation());
}
status.setStatus("System Regions assigned.");
initNamespace();
}
private void enableSSHandWaitForMeta() throws IOException, InterruptedException {
enableServerShutdownHandler();
this.catalogTracker.waitForMeta();
@ -1006,9 +1105,31 @@ MasterServices, Server {
this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
}
private void enableCatalogTables(String catalogTableName) {
if (!this.assignmentManager.getZKTable().isEnabledTable(catalogTableName)) {
this.assignmentManager.setEnabledTable(catalogTableName);
private boolean waitVerifiedRegionLocation(HRegionInfo regionInfo) throws IOException {
while (!this.stopped) {
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker,
regionInfo.getRegionName());
if (verifyRegionLocation(p.getSecond(), p.getFirst())) break;
}
// We got here because we came of above loop.
return !this.stopped;
}
private boolean verifyRegionLocation(ServerName currServer, HRegionInfo regionInfo) {
try {
return
ProtobufUtil.getRegionInfo(HConnectionManager.getConnection(conf)
.getAdmin(currServer),
regionInfo.getRegionName()) != null;
} catch (IOException e) {
LOG.info("Failed to contact server: "+currServer, e);
}
return false;
}
private void enableMeta(TableName metaTableName) {
if (!this.assignmentManager.getZKTable().isEnabledTable(metaTableName)) {
this.assignmentManager.setEnabledTable(metaTableName);
}
}
@ -1028,6 +1149,12 @@ MasterServices, Server {
return true;
}
void initNamespace() throws IOException {
//create namespace manager
tableNamespaceManager = new TableNamespaceManager(this);
tableNamespaceManager.start();
}
/**
* This function returns a set of region server names under .META. recovering region ZK node
* @return Set of meta server names which were recorded in ZK
@ -1205,6 +1332,9 @@ MasterServices, Server {
if (this.clusterStatusPublisherChore != null){
clusterStatusPublisherChore.interrupt();
}
if (this.namespaceJanitorChore != null){
namespaceJanitorChore.interrupt();
}
}
@Override
@ -1386,7 +1516,7 @@ MasterServices, Server {
}
}
Map<String, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
Map<TableName, Map<ServerName, List<HRegionInfo>>> assignmentsByTable =
this.assignmentManager.getRegionStates().getAssignmentsByTable();
List<RegionPlan> plans = new ArrayList<RegionPlan>();
@ -1645,13 +1775,18 @@ MasterServices, Server {
throw new MasterNotRunningException();
}
HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
if (getNamespaceDescriptor(namespace) == null) {
throw new ConstraintException("Namespace " + namespace + " does not exist");
}
HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized();
checkCompression(hTableDescriptor);
if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions);
}
this.executorService.submit(new CreateTableHandler(this,
this.fileSystemManager, hTableDescriptor, conf,
newRegions, this).prepare());
@ -1694,7 +1829,7 @@ MasterServices, Server {
HRegionInfo[] hRegionInfos = null;
if (splitKeys == null || splitKeys.length == 0) {
hRegionInfos = new HRegionInfo[]{
new HRegionInfo(hTableDescriptor.getName(), null, null)};
new HRegionInfo(hTableDescriptor.getTableName(), null, null)};
} else {
int numRegions = splitKeys.length + 1;
hRegionInfos = new HRegionInfo[numRegions];
@ -1703,19 +1838,19 @@ MasterServices, Server {
for (int i = 0; i < numRegions; i++) {
endKey = (i == splitKeys.length) ? null : splitKeys[i];
hRegionInfos[i] =
new HRegionInfo(hTableDescriptor.getName(), startKey, endKey);
new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey);
startKey = endKey;
}
}
return hRegionInfos;
}
private static boolean isCatalogTable(final byte [] tableName) {
return Bytes.equals(tableName, HConstants.META_TABLE_NAME);
private static boolean isCatalogTable(final TableName tableName) {
return tableName.equals(TableName.META_TABLE_NAME);
}
@Override
public void deleteTable(final byte[] tableName) throws IOException {
public void deleteTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDeleteTable(tableName);
@ -1730,7 +1865,7 @@ MasterServices, Server {
public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
throws ServiceException {
try {
deleteTable(request.getTableName().toByteArray());
deleteTable(ProtobufUtil.toTableName(request.getTableName()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
@ -1752,7 +1887,7 @@ MasterServices, Server {
// may overlap with other table operations or the table operation may
// have completed before querying this API. We need to refactor to a
// transaction system in the future to avoid these ambiguities.
byte [] tableName = req.getTableName().toByteArray();
TableName tableName = ProtobufUtil.toTableName(req.getTableName());
try {
Pair<Integer,Integer> pair = this.assignmentManager.getReopenStatus(tableName);
@ -1766,7 +1901,7 @@ MasterServices, Server {
}
@Override
public void addColumn(final byte[] tableName, final HColumnDescriptor column)
public void addColumn(final TableName tableName, final HColumnDescriptor column)
throws IOException {
checkInitialized();
if (cpHost != null) {
@ -1786,7 +1921,7 @@ MasterServices, Server {
public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
throws ServiceException {
try {
addColumn(req.getTableName().toByteArray(),
addColumn(ProtobufUtil.toTableName(req.getTableName()),
HColumnDescriptor.convert(req.getColumnFamilies()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
@ -1795,7 +1930,7 @@ MasterServices, Server {
}
@Override
public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
public void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
throws IOException {
checkInitialized();
checkCompression(descriptor);
@ -1815,7 +1950,7 @@ MasterServices, Server {
public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
throws ServiceException {
try {
modifyColumn(req.getTableName().toByteArray(),
modifyColumn(ProtobufUtil.toTableName(req.getTableName()),
HColumnDescriptor.convert(req.getColumnFamilies()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
@ -1824,7 +1959,7 @@ MasterServices, Server {
}
@Override
public void deleteColumn(final byte[] tableName, final byte[] columnName)
public void deleteColumn(final TableName tableName, final byte[] columnName)
throws IOException {
checkInitialized();
if (cpHost != null) {
@ -1842,7 +1977,8 @@ MasterServices, Server {
public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
throws ServiceException {
try {
deleteColumn(req.getTableName().toByteArray(), req.getColumnName().toByteArray());
deleteColumn(ProtobufUtil.toTableName(req.getTableName()),
req.getColumnName().toByteArray());
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
@ -1850,7 +1986,7 @@ MasterServices, Server {
}
@Override
public void enableTable(final byte[] tableName) throws IOException {
public void enableTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preEnableTable(tableName);
@ -1866,7 +2002,7 @@ MasterServices, Server {
public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
throws ServiceException {
try {
enableTable(request.getTableName().toByteArray());
enableTable(ProtobufUtil.toTableName(request.getTableName()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
@ -1874,7 +2010,7 @@ MasterServices, Server {
}
@Override
public void disableTable(final byte[] tableName) throws IOException {
public void disableTable(final TableName tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDisableTable(tableName);
@ -1890,7 +2026,7 @@ MasterServices, Server {
public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
throws ServiceException {
try {
disableTable(request.getTableName().toByteArray());
disableTable(ProtobufUtil.toTableName(request.getTableName()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
@ -1904,7 +2040,7 @@ MasterServices, Server {
* may be null.
*/
Pair<HRegionInfo, ServerName> getTableRegionForRow(
final byte [] tableName, final byte [] rowKey)
final TableName tableName, final byte [] rowKey)
throws IOException {
final AtomicReference<Pair<HRegionInfo, ServerName>> result =
new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
@ -1920,7 +2056,7 @@ MasterServices, Server {
if (pair == null) {
return false;
}
if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) {
if (!pair.getFirst().getTableName().equals(tableName)) {
return false;
}
result.set(pair);
@ -1933,7 +2069,7 @@ MasterServices, Server {
}
@Override
public void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
public void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
throws IOException {
checkInitialized();
checkCompression(descriptor);
@ -1950,7 +2086,7 @@ MasterServices, Server {
public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
throws ServiceException {
try {
modifyTable(req.getTableName().toByteArray(),
modifyTable(ProtobufUtil.toTableName(req.getTableName()),
HTableDescriptor.convert(req.getTableSchema()));
} catch (IOException ioe) {
throw new ServiceException(ioe);
@ -1959,17 +2095,16 @@ MasterServices, Server {
}
@Override
public void checkTableModifiable(final byte [] tableName)
public void checkTableModifiable(final TableName tableName)
throws IOException, TableNotFoundException, TableNotDisabledException {
String tableNameStr = Bytes.toString(tableName);
if (isCatalogTable(tableName)) {
throw new IOException("Can't modify catalog tables");
}
if (!MetaReader.tableExists(getCatalogTracker(), tableNameStr)) {
throw new TableNotFoundException(tableNameStr);
if (!MetaReader.tableExists(getCatalogTracker(), tableName)) {
throw new TableNotFoundException(tableName);
}
if (!getAssignmentManager().getZKTable().
isDisabledTable(Bytes.toString(tableName))) {
isDisabledTable(tableName)) {
throw new TableNotDisabledException(tableName);
}
}
@ -2436,11 +2571,14 @@ MasterServices, Server {
public GetTableDescriptorsResponse getTableDescriptors(
RpcController controller, GetTableDescriptorsRequest req) throws ServiceException {
List<HTableDescriptor> descriptors = new ArrayList<HTableDescriptor>();
List<TableName> tableNameList = new ArrayList<TableName>();
for(HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
}
boolean bypass = false;
if (this.cpHost != null) {
try {
bypass = this.cpHost.preGetTableDescriptors(req.getTableNamesList(), descriptors);
bypass = this.cpHost.preGetTableDescriptors(tableNameList, descriptors);
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
@ -2456,10 +2594,14 @@ MasterServices, Server {
LOG.warn("Failed getting all descriptors", e);
}
if (descriptorMap != null) {
descriptors.addAll(descriptorMap.values());
for(HTableDescriptor desc: descriptorMap.values()) {
if(!HTableDescriptor.isSystemTable(desc.getTableName())) {
descriptors.add(desc);
}
}
}
} else {
for (String s: req.getTableNamesList()) {
for (TableName s: tableNameList) {
try {
HTableDescriptor desc = this.tableDescriptors.get(s);
if (desc != null) {
@ -2809,9 +2951,136 @@ MasterServices, Server {
}
}
@Override
public MasterAdminProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller,
MasterAdminProtos.ModifyNamespaceRequest request) throws ServiceException {
try {
modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
return MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.CreateNamespaceResponse createNamespace(RpcController controller,
MasterAdminProtos.CreateNamespaceRequest request) throws ServiceException {
try {
createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
return MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller, MasterAdminProtos.DeleteNamespaceRequest request) throws ServiceException {
try {
deleteNamespace(request.getNamespaceName());
return MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(
RpcController controller, MasterAdminProtos.GetNamespaceDescriptorRequest request)
throws ServiceException {
try {
return MasterAdminProtos.GetNamespaceDescriptorResponse.newBuilder()
.setNamespaceDescriptor(
ProtobufUtil.toProtoNamespaceDescriptor(getNamespaceDescriptor(request.getNamespaceName())))
.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(
RpcController controller, MasterAdminProtos.ListNamespaceDescriptorsRequest request)
throws ServiceException {
try {
MasterAdminProtos.ListNamespaceDescriptorsResponse.Builder response =
MasterAdminProtos.ListNamespaceDescriptorsResponse.newBuilder();
for(NamespaceDescriptor ns: listNamespaceDescriptors()) {
response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
}
return response.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace(
RpcController controller, MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request)
throws ServiceException {
try {
MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.Builder b =
MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.newBuilder();
for(HTableDescriptor htd: getTableDescriptorsByNamespace(request.getNamespaceName())) {
b.addTableSchema(htd.convert());
}
return b.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
private boolean isHealthCheckerConfigured() {
String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC);
return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation);
}
public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
if (cpHost != null) {
if (cpHost.preCreateNamespace(descriptor)) {
return;
}
}
tableNamespaceManager.create(descriptor);
if (cpHost != null) {
cpHost.postCreateNamespace(descriptor);
}
}
public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
if (cpHost != null) {
if (cpHost.preModifyNamespace(descriptor)) {
return;
}
}
tableNamespaceManager.update(descriptor);
if (cpHost != null) {
cpHost.postModifyNamespace(descriptor);
}
}
public void deleteNamespace(String name) throws IOException {
if (cpHost != null) {
if (cpHost.preDeleteNamespace(name)) {
return;
}
}
tableNamespaceManager.remove(name);
if (cpHost != null) {
cpHost.postDeleteNamespace(name);
}
}
public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
return tableNamespaceManager.get(name);
}
public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
return Lists.newArrayList(tableNamespaceManager.list());
}
public List<HTableDescriptor> getTableDescriptorsByNamespace(String name) throws IOException {
return Lists.newArrayList(tableDescriptors.getByNamespace(name).values());
}
}

View File

@ -87,6 +87,124 @@ public class MasterCoprocessorHost
abortServer("master", masterServices, env, e);
}
boolean preCreateNamespace(NamespaceDescriptor ns)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).preCreateNamespace(
ctx, ns);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass;
}
void postCreateNamespace(NamespaceDescriptor ns)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).postCreateNamespace(ctx, ns);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
boolean preDeleteNamespace(String namespaceName) throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).preDeleteNamespace(
ctx, namespaceName);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass;
}
void postDeleteNamespace(String namespaceName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).postDeleteNamespace(ctx, namespaceName);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
boolean preModifyNamespace(NamespaceDescriptor ns)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).preModifyNamespace(
ctx, ns);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass;
}
void postModifyNamespace(NamespaceDescriptor ns)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((MasterObserver)env.getInstance()).postModifyNamespace(ctx, ns);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
/* Implementation of hooks for invoking MasterObservers */
public void preCreateTable(HTableDescriptor htd, HRegionInfo[] regions)
throws IOException {
@ -162,7 +280,7 @@ public class MasterCoprocessorHost
}
}
public void preDeleteTable(byte[] tableName) throws IOException {
public void preDeleteTable(TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -179,7 +297,7 @@ public class MasterCoprocessorHost
}
}
public void postDeleteTable(byte[] tableName) throws IOException {
public void postDeleteTable(TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -196,7 +314,7 @@ public class MasterCoprocessorHost
}
}
public void preDeleteTableHandler(byte[] tableName) throws IOException {
public void preDeleteTableHandler(TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -214,7 +332,7 @@ public class MasterCoprocessorHost
}
}
public void postDeleteTableHandler(byte[] tableName) throws IOException {
public void postDeleteTableHandler(TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -231,7 +349,7 @@ public class MasterCoprocessorHost
}
}
}
public void preModifyTable(final byte[] tableName, HTableDescriptor htd)
public void preModifyTable(final TableName tableName, HTableDescriptor htd)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@ -250,7 +368,7 @@ public class MasterCoprocessorHost
}
}
public void postModifyTable(final byte[] tableName, HTableDescriptor htd)
public void postModifyTable(final TableName tableName, HTableDescriptor htd)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@ -269,7 +387,7 @@ public class MasterCoprocessorHost
}
}
public void preModifyTableHandler(final byte[] tableName, HTableDescriptor htd)
public void preModifyTableHandler(final TableName tableName, HTableDescriptor htd)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -288,7 +406,7 @@ public class MasterCoprocessorHost
}
}
public void postModifyTableHandler(final byte[] tableName,
public void postModifyTableHandler(final TableName tableName,
HTableDescriptor htd) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -307,7 +425,7 @@ public class MasterCoprocessorHost
}
}
public boolean preAddColumn(byte [] tableName, HColumnDescriptor column)
public boolean preAddColumn(TableName tableName, HColumnDescriptor column)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -328,7 +446,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postAddColumn(byte [] tableName, HColumnDescriptor column)
public void postAddColumn(TableName tableName, HColumnDescriptor column)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@ -347,7 +465,7 @@ public class MasterCoprocessorHost
}
}
public boolean preAddColumnHandler(byte[] tableName, HColumnDescriptor column)
public boolean preAddColumnHandler(TableName tableName, HColumnDescriptor column)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -369,7 +487,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postAddColumnHandler(byte[] tableName, HColumnDescriptor column)
public void postAddColumnHandler(TableName tableName, HColumnDescriptor column)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -388,7 +506,7 @@ public class MasterCoprocessorHost
}
}
public boolean preModifyColumn(byte [] tableName, HColumnDescriptor descriptor)
public boolean preModifyColumn(TableName tableName, HColumnDescriptor descriptor)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -410,7 +528,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postModifyColumn(byte [] tableName, HColumnDescriptor descriptor)
public void postModifyColumn(TableName tableName, HColumnDescriptor descriptor)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@ -429,7 +547,7 @@ public class MasterCoprocessorHost
}
}
public boolean preModifyColumnHandler(byte[] tableName,
public boolean preModifyColumnHandler(TableName tableName,
HColumnDescriptor descriptor) throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -451,7 +569,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postModifyColumnHandler(byte[] tableName,
public void postModifyColumnHandler(TableName tableName,
HColumnDescriptor descriptor) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -470,7 +588,7 @@ public class MasterCoprocessorHost
}
}
boolean preDeleteColumn(final byte [] tableName, final byte [] c)
boolean preDeleteColumn(final TableName tableName, final byte [] c)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -491,7 +609,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postDeleteColumn(final byte [] tableName, final byte [] c)
public void postDeleteColumn(final TableName tableName, final byte [] c)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
@ -510,7 +628,7 @@ public class MasterCoprocessorHost
}
}
public boolean preDeleteColumnHandler(final byte[] tableName, final byte[] c)
public boolean preDeleteColumnHandler(final TableName tableName, final byte[] c)
throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
@ -532,7 +650,7 @@ public class MasterCoprocessorHost
return bypass;
}
public void postDeleteColumnHandler(final byte[] tableName, final byte[] c)
public void postDeleteColumnHandler(final TableName tableName, final byte[] c)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -551,7 +669,7 @@ public class MasterCoprocessorHost
}
}
public void preEnableTable(final byte [] tableName) throws IOException {
public void preEnableTable(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -568,7 +686,7 @@ public class MasterCoprocessorHost
}
}
public void postEnableTable(final byte [] tableName) throws IOException {
public void postEnableTable(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -585,7 +703,7 @@ public class MasterCoprocessorHost
}
}
public void preEnableTableHandler(final byte[] tableName) throws IOException {
public void preEnableTableHandler(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -603,7 +721,7 @@ public class MasterCoprocessorHost
}
}
public void postEnableTableHandler(final byte[] tableName) throws IOException {
public void postEnableTableHandler(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -621,7 +739,7 @@ public class MasterCoprocessorHost
}
}
public void preDisableTable(final byte [] tableName) throws IOException {
public void preDisableTable(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -638,7 +756,7 @@ public class MasterCoprocessorHost
}
}
public void postDisableTable(final byte [] tableName) throws IOException {
public void postDisableTable(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env: coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -655,7 +773,7 @@ public class MasterCoprocessorHost
}
}
public void preDisableTableHandler(final byte[] tableName) throws IOException {
public void preDisableTableHandler(final TableName tableName) throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
if (env.getInstance() instanceof MasterObserver) {
@ -673,7 +791,7 @@ public class MasterCoprocessorHost
}
}
public void postDisableTableHandler(final byte[] tableName)
public void postDisableTableHandler(final TableName tableName)
throws IOException {
ObserverContext<MasterCoprocessorEnvironment> ctx = null;
for (MasterEnvironment env : coprocessors) {
@ -1114,7 +1232,7 @@ public class MasterCoprocessorHost
}
}
public boolean preGetTableDescriptors(final List<String> tableNamesList,
public boolean preGetTableDescriptors(final List<TableName> tableNamesList,
final List<HTableDescriptor> descriptors) throws IOException {
boolean bypass = false;
ObserverContext<MasterCoprocessorEnvironment> ctx = null;

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -529,29 +530,8 @@ public class MasterFileSystem {
HFileArchiver.archiveRegion(conf, fs, region);
}
public void deleteTable(byte[] tableName) throws IOException {
fs.delete(new Path(rootdir, Bytes.toString(tableName)), true);
}
/**
* Move the specified file/directory to the hbase temp directory.
* @param path The path of the file/directory to move
* @return The temp location of the file/directory moved
* @throws IOException in case of file-system failure
*/
public Path moveToTemp(final Path path) throws IOException {
Path tempPath = new Path(this.tempdir, path.getName());
// Ensure temp exists
if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
}
if (!fs.rename(path, tempPath)) {
throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
}
return tempPath;
public void deleteTable(TableName tableName) throws IOException {
fs.delete(FSUtils.getTableDir(rootdir, tableName), true);
}
/**
@ -560,8 +540,20 @@ public class MasterFileSystem {
* @return The temp location of the table moved
* @throws IOException in case of file-system failure
*/
public Path moveTableToTemp(byte[] tableName) throws IOException {
return moveToTemp(HTableDescriptor.getTableDir(this.rootdir, tableName));
public Path moveTableToTemp(TableName tableName) throws IOException {
Path srcPath = FSUtils.getTableDir(rootdir, tableName);
Path tempPath = FSUtils.getTableDir(this.tempdir, tableName);
// Ensure temp exists
if (!fs.exists(tempPath.getParent()) && !fs.mkdirs(tempPath.getParent())) {
throw new IOException("HBase temp directory '" + tempPath.getParent() + "' creation failure.");
}
if (!fs.rename(srcPath, tempPath)) {
throw new IOException("Unable to move '" + srcPath + "' to temp '" + tempPath + "'");
}
return tempPath;
}
public void updateRegionInfo(HRegionInfo region) {
@ -573,7 +565,7 @@ public class MasterFileSystem {
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
throws IOException {
// archive family store files
Path tableDir = new Path(rootdir, region.getTableNameAsString());
Path tableDir = FSUtils.getTableDir(rootdir, region.getTableName());
HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
// delete the family folder
@ -600,9 +592,9 @@ public class MasterFileSystem {
* @return Modified HTableDescriptor with requested column deleted.
* @throws IOException
*/
public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName)
public HTableDescriptor deleteColumn(TableName tableName, byte[] familyName)
throws IOException {
LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName)
LOG.info("DeleteColumn. Table = " + tableName
+ " family = " + Bytes.toString(familyName));
HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
htd.removeFamily(familyName);
@ -617,9 +609,9 @@ public class MasterFileSystem {
* @return Modified HTableDescriptor with the column modified.
* @throws IOException
*/
public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd)
public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd)
throws IOException {
LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName)
LOG.info("AddModifyColumn. Table = " + tableName
+ " HCD = " + hcd.toString());
HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
@ -640,9 +632,9 @@ public class MasterFileSystem {
* @return Modified HTableDescriptor with new column added.
* @throws IOException
*/
public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd)
public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd)
throws IOException {
LOG.info("AddColumn. Table = " + Bytes.toString(tableName) + " HCD = " +
LOG.info("AddColumn. Table = " + tableName + " HCD = " +
hcd.toString());
HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
if (htd == null) {

View File

@ -19,11 +19,14 @@
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableNotDisabledException;
@ -75,7 +78,7 @@ public interface MasterServices extends Server {
* @throws IOException
*/
// We actually throw the exceptions mentioned in the
void checkTableModifiable(final byte[] tableName)
void checkTableModifiable(final TableName tableName)
throws IOException, TableNotFoundException, TableNotDisabledException;
/**
@ -92,7 +95,7 @@ public interface MasterServices extends Server {
* @param tableName The table name
* @throws IOException
*/
void deleteTable(final byte[] tableName) throws IOException;
void deleteTable(final TableName tableName) throws IOException;
/**
* Modify the descriptor of an existing table
@ -100,7 +103,7 @@ public interface MasterServices extends Server {
* @param descriptor The updated table descriptor
* @throws IOException
*/
void modifyTable(final byte[] tableName, final HTableDescriptor descriptor)
void modifyTable(final TableName tableName, final HTableDescriptor descriptor)
throws IOException;
/**
@ -108,14 +111,15 @@ public interface MasterServices extends Server {
* @param tableName The table name
* @throws IOException
*/
void enableTable(final byte[] tableName) throws IOException;
void enableTable(final TableName tableName) throws IOException;
/**
* Disable an existing table
* @param tableName The table name
* @throws IOException
*/
void disableTable(final byte[] tableName) throws IOException;
void disableTable(final TableName tableName) throws IOException;
/**
* Add a new column to an existing table
@ -123,7 +127,7 @@ public interface MasterServices extends Server {
* @param column The column definition
* @throws IOException
*/
void addColumn(final byte[] tableName, final HColumnDescriptor column)
void addColumn(final TableName tableName, final HColumnDescriptor column)
throws IOException;
/**
@ -132,7 +136,7 @@ public interface MasterServices extends Server {
* @param descriptor The updated column definition
* @throws IOException
*/
void modifyColumn(byte[] tableName, HColumnDescriptor descriptor)
void modifyColumn(TableName tableName, HColumnDescriptor descriptor)
throws IOException;
/**
@ -141,7 +145,7 @@ public interface MasterServices extends Server {
* @param columnName The column name
* @throws IOException
*/
void deleteColumn(final byte[] tableName, final byte[] columnName)
void deleteColumn(final TableName tableName, final byte[] columnName)
throws IOException;
/**
@ -187,4 +191,47 @@ public interface MasterServices extends Server {
*/
boolean isInitialized();
/**
* Create a new namespace
* @param descriptor descriptor which describes the new namespace
* @throws IOException
*/
public void createNamespace(NamespaceDescriptor descriptor) throws IOException;
/**
* Modify an existing namespace
* @param descriptor descriptor which updates the existing namespace
* @throws IOException
*/
public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException;
/**
* Delete an existing namespace. Only empty namespaces (no tables) can be removed.
* @param name namespace name
* @throws IOException
*/
public void deleteNamespace(String name) throws IOException;
/**
* Get a namespace descriptor by name
* @param name name of namespace descriptor
* @return
* @throws IOException
*/
public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException;
/**
* List available namespace descriptors
* @return
* @throws IOException
*/
public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException;
/**
* Get list of table descriptors by namespace
* @param name namespace name
* @return
* @throws IOException
*/
public List<HTableDescriptor> getTableDescriptorsByNamespace(String name) throws IOException;
}

View File

@ -0,0 +1,150 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
import java.io.IOException;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* A janitor for the namespace artifacts.
* Traverses hdfs and zk to remove orphaned directories/znodes
*/
@InterfaceAudience.Private
public class NamespaceJanitor extends Chore {
private static final Log LOG = LogFactory.getLog(NamespaceJanitor.class.getName());
private final MasterServices services;
private AtomicBoolean enabled = new AtomicBoolean(true);
private AtomicBoolean alreadyRunning = new AtomicBoolean(false);
public NamespaceJanitor(final MasterServices services) {
super("NamespaceJanitor-" + services.getServerName().toShortString(),
services.getConfiguration().getInt("hbase.namespacejanitor.interval", 30000),
services);
this.services = services;
}
@Override
protected boolean initialChore() {
try {
if (this.enabled.get()) removeOrphans();
} catch (IOException e) {
LOG.warn("Failed NamespaceJanitor chore", e);
return false;
} catch (KeeperException e) {
LOG.warn("Failed NamespaceJanitor chore", e);
return false;
}
return true;
}
/**
* @param enabled
*/
public boolean setEnabled(final boolean enabled) {
return this.enabled.getAndSet(enabled);
}
boolean getEnabled() {
return this.enabled.get();
}
@Override
protected void chore() {
try {
if (this.enabled.get()) {
removeOrphans();
} else {
LOG.warn("NamepsaceJanitor disabled! Not running scan.");
}
} catch (IOException e) {
LOG.warn("Failed NamespaceJanitor chore", e);
} catch (KeeperException e) {
LOG.warn("Failed NamespaceJanitor chore", e);
}
}
private void removeOrphans() throws IOException, KeeperException {
//cache the info so we don't need to keep the master nsLock for long
//and not be wasteful with rpc calls
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Set<String> descs = Sets.newHashSet();
for(NamespaceDescriptor ns : services.listNamespaceDescriptors()) {
descs.add(ns.getName());
}
//cleanup hdfs orphans
for (FileStatus nsStatus : FSUtils.listStatus(fs,
new Path(FSUtils.getRootDir(services.getConfiguration()), HConstants.BASE_NAMESPACE_DIR))) {
if (!descs.contains(nsStatus.getPath().getName()) &&
!NamespaceDescriptor.RESERVED_NAMESPACES.contains(nsStatus.getPath().getName())) {
boolean isEmpty = true;
for(FileStatus status : fs.listStatus(nsStatus.getPath())) {
if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
isEmpty = false;
break;
}
}
if(isEmpty) {
try {
if (!fs.delete(nsStatus.getPath(), true)) {
LOG.error("Failed to remove namespace directory: " + nsStatus.getPath());
}
} catch (IOException ex) {
LOG.error("Failed to remove namespace directory: " + nsStatus.getPath(),
ex);
}
LOG.debug("Removed namespace directory: "+nsStatus.getPath());
} else {
LOG.debug("Skipping non-empty namespace directory: " + nsStatus.getPath());
}
}
}
String baseZnode = ZooKeeperWatcher.namespaceZNode;
for(String child : ZKUtil.listChildrenNoWatch(services.getZooKeeper(), baseZnode)) {
if (!descs.contains(child) &&
!NamespaceDescriptor.RESERVED_NAMESPACES.contains(child)) {
String znode = ZKUtil.joinZNode(baseZnode, child);
try {
ZKUtil.deleteNode(services.getZooKeeper(), znode);
LOG.debug("Removed namespace znode: " + znode);
} catch (KeeperException ex) {
LOG.debug("Failed to remove namespace znode: " + znode, ex);
}
}
}
}
}

View File

@ -29,6 +29,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
@ -417,13 +418,13 @@ public class RegionStates {
* @param tableName
* @return Online regions from <code>tableName</code>
*/
public synchronized List<HRegionInfo> getRegionsOfTable(byte[] tableName) {
public synchronized List<HRegionInfo> getRegionsOfTable(TableName tableName) {
List<HRegionInfo> tableRegions = new ArrayList<HRegionInfo>();
// boundary needs to have table's name but regionID 0 so that it is sorted
// before all table's regions.
HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L);
for (HRegionInfo hri: regionAssignments.tailMap(boundary).keySet()) {
if(!Bytes.equals(hri.getTableName(), tableName)) break;
if(!hri.getTableName().equals(tableName)) break;
tableRegions.add(hri);
}
return tableRegions;
@ -503,9 +504,10 @@ public class RegionStates {
*
* @return A clone of current assignments by table.
*/
protected Map<String, Map<ServerName, List<HRegionInfo>>> getAssignmentsByTable() {
Map<String, Map<ServerName, List<HRegionInfo>>> result =
new HashMap<String, Map<ServerName,List<HRegionInfo>>>();
protected Map<TableName, Map<ServerName, List<HRegionInfo>>>
getAssignmentsByTable() {
Map<TableName, Map<ServerName, List<HRegionInfo>>> result =
new HashMap<TableName, Map<ServerName,List<HRegionInfo>>>();
synchronized (this) {
if (!server.getConfiguration().getBoolean("hbase.master.loadbalance.bytable", false)) {
Map<ServerName, List<HRegionInfo>> svrToRegions =
@ -513,12 +515,12 @@ public class RegionStates {
for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
svrToRegions.put(e.getKey(), new ArrayList<HRegionInfo>(e.getValue()));
}
result.put("ensemble", svrToRegions);
result.put(TableName.valueOf("ensemble"), svrToRegions);
} else {
for (Map.Entry<ServerName, Set<HRegionInfo>> e: serverHoldings.entrySet()) {
for (HRegionInfo hri: e.getValue()) {
if (hri.isMetaRegion()) continue;
String tablename = hri.getTableNameAsString();
TableName tablename = hri.getTableName();
Map<ServerName, List<HRegionInfo>> svrToRegions = result.get(tablename);
if (svrToRegions == null) {
svrToRegions = new HashMap<ServerName, List<HRegionInfo>>(serverHoldings.size());

View File

@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.InterProcessLock;
import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
import org.apache.hadoop.hbase.InterProcessReadWriteLock;
@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hbase.zookeeper.lock.ZKInterProcessReadWriteLock;
import org.apache.zookeeper.KeeperException;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
/**
@ -104,7 +104,7 @@ public abstract class TableLockManager {
* @param purpose Human readable reason for locking the table
* @return A new TableLock object for acquiring a write lock
*/
public abstract TableLock writeLock(byte[] tableName, String purpose);
public abstract TableLock writeLock(TableName tableName, String purpose);
/**
* Returns a TableLock for locking the table for shared access among read-lock holders
@ -112,7 +112,7 @@ public abstract class TableLockManager {
* @param purpose Human readable reason for locking the table
* @return A new TableLock object for acquiring a read lock
*/
public abstract TableLock readLock(byte[] tableName, String purpose);
public abstract TableLock readLock(TableName tableName, String purpose);
/**
* Visits all table locks(read and write), and lock attempts with the given callback
@ -148,7 +148,7 @@ public abstract class TableLockManager {
* @param tableName name of the table
* @throws IOException If there is an unrecoverable error releasing the lock
*/
public abstract void tableDeleted(byte[] tableName)
public abstract void tableDeleted(TableName tableName)
throws IOException;
/**
@ -186,11 +186,11 @@ public abstract class TableLockManager {
}
}
@Override
public TableLock writeLock(byte[] tableName, String purpose) {
public TableLock writeLock(TableName tableName, String purpose) {
return new NullTableLock();
}
@Override
public TableLock readLock(byte[] tableName, String purpose) {
public TableLock readLock(TableName tableName, String purpose) {
return new NullTableLock();
}
@Override
@ -200,7 +200,7 @@ public abstract class TableLockManager {
public void reapWriteLocks() throws IOException {
}
@Override
public void tableDeleted(byte[] tableName) throws IOException {
public void tableDeleted(TableName tableName) throws IOException {
}
@Override
public void visitAllLocks(MetadataHandler handler) throws IOException {
@ -249,18 +249,16 @@ public abstract class TableLockManager {
private static class TableLockImpl implements TableLock {
long lockTimeoutMs;
byte[] tableName;
String tableNameStr;
TableName tableName;
InterProcessLock lock;
boolean isShared;
ZooKeeperWatcher zkWatcher;
ServerName serverName;
String purpose;
public TableLockImpl(byte[] tableName, ZooKeeperWatcher zkWatcher,
public TableLockImpl(TableName tableName, ZooKeeperWatcher zkWatcher,
ServerName serverName, long lockTimeoutMs, boolean isShared, String purpose) {
this.tableName = tableName;
tableNameStr = Bytes.toString(tableName);
this.zkWatcher = zkWatcher;
this.serverName = serverName;
this.lockTimeoutMs = lockTimeoutMs;
@ -272,7 +270,7 @@ public abstract class TableLockManager {
public void acquire() throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Attempt to acquire table " + (isShared ? "read" : "write") +
" lock on: " + tableNameStr + " for:" + purpose);
" lock on: " + tableName + " for:" + purpose);
}
lock = createTableLock();
@ -283,47 +281,48 @@ public abstract class TableLockManager {
} else {
if (!lock.tryAcquire(lockTimeoutMs)) {
throw new LockTimeoutException("Timed out acquiring " +
(isShared ? "read" : "write") + "lock for table:" + tableNameStr +
(isShared ? "read" : "write") + "lock for table:" + tableName +
"for:" + purpose + " after " + lockTimeoutMs + " ms.");
}
}
} catch (InterruptedException e) {
LOG.warn("Interrupted acquiring a lock for " + tableNameStr, e);
LOG.warn("Interrupted acquiring a lock for " + tableName, e);
Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted acquiring a lock");
}
if (LOG.isTraceEnabled()) LOG.trace("Acquired table " + (isShared ? "read" : "write")
+ " lock on " + tableNameStr + " for " + purpose);
+ " lock on " + tableName + " for " + purpose);
}
@Override
public void release() throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("Attempt to release table " + (isShared ? "read" : "write")
+ " lock on " + tableNameStr);
+ " lock on " + tableName);
}
if (lock == null) {
throw new IllegalStateException("Table " + tableNameStr +
throw new IllegalStateException("Table " + tableName +
" is not locked!");
}
try {
lock.release();
} catch (InterruptedException e) {
LOG.warn("Interrupted while releasing a lock for " + tableNameStr);
LOG.warn("Interrupted while releasing a lock for " + tableName);
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
if (LOG.isTraceEnabled()) {
LOG.trace("Released table lock on " + tableNameStr);
LOG.trace("Released table lock on " + tableName);
}
}
private InterProcessLock createTableLock() {
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableNameStr);
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode,
tableName.getNameAsString());
ZooKeeperProtos.TableLock data = ZooKeeperProtos.TableLock.newBuilder()
.setTableName(ByteString.copyFrom(tableName))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setLockOwner(ProtobufUtil.toServerName(serverName))
.setThreadId(Thread.currentThread().getId())
.setPurpose(purpose)
@ -367,12 +366,12 @@ public abstract class TableLockManager {
}
@Override
public TableLock writeLock(byte[] tableName, String purpose) {
public TableLock writeLock(TableName tableName, String purpose) {
return new TableLockImpl(tableName, zkWatcher,
serverName, writeLockTimeoutMs, false, purpose);
}
public TableLock readLock(byte[] tableName, String purpose) {
public TableLock readLock(TableName tableName, String purpose) {
return new TableLockImpl(tableName, zkWatcher,
serverName, readLockTimeoutMs, true, purpose);
}
@ -435,9 +434,9 @@ public abstract class TableLockManager {
}
@Override
public void tableDeleted(byte[] tableName) throws IOException {
public void tableDeleted(TableName tableName) throws IOException {
//table write lock from DeleteHandler is already released, just delete the parent znode
String tableNameStr = Bytes.toString(tableName);
String tableNameStr = tableName.getNameAsString();
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableNameStr);
try {
ZKUtil.deleteNode(zkWatcher, tableLockZNode);

View File

@ -0,0 +1,224 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.NavigableSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZKNamespaceManager;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.collect.Sets;
import org.apache.hadoop.hbase.util.FSUtils;
/**
* This is a helper class used to manage the namespace
* metadata that is stored in {@see HConstants.NAMESPACE_TABLE_NAME}
* It also mirrors updates to the ZK store by forwarding updates to
* {@link org.apache.hadoop.hbase.ZKNamespaceManager}
*/
@InterfaceAudience.Private
public class TableNamespaceManager {
private static final Log LOG = LogFactory.getLog(TableNamespaceManager.class);
private Configuration conf;
private MasterServices masterServices;
private HTable table;
private ZKNamespaceManager zkNamespaceManager;
public TableNamespaceManager(MasterServices masterServices) throws IOException {
this.masterServices = masterServices;
this.conf = masterServices.getConfiguration();
}
public void start() throws IOException {
TableName tableName = TableName.NAMESPACE_TABLE_NAME;
try {
if (!MetaReader.tableExists(masterServices.getCatalogTracker(),
tableName)) {
LOG.info("Namespace table not found. Creating...");
createNamespaceTable(masterServices);
}
} catch (InterruptedException e) {
throw new IOException("Wait for namespace table assignment interrupted", e);
}
table = new HTable(conf, tableName);
zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
zkNamespaceManager.start();
if (get(NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) {
create(NamespaceDescriptor.DEFAULT_NAMESPACE);
}
if (get(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()) == null) {
create(NamespaceDescriptor.SYSTEM_NAMESPACE);
}
ResultScanner scanner = table.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for(Result result : scanner) {
NamespaceDescriptor ns =
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(
result.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue()));
zkNamespaceManager.update(ns);
}
} finally {
scanner.close();
}
}
public synchronized NamespaceDescriptor get(String name) throws IOException {
Result res = table.get(new Get(Bytes.toBytes(name)));
if (res.isEmpty()) {
return null;
}
return
ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(
res.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue()));
}
public synchronized void create(NamespaceDescriptor ns) throws IOException {
if (get(ns.getName()) != null) {
throw new ConstraintException("Namespace "+ns.getName()+" already exists");
}
FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
fs.mkdirs(FSUtils.getNamespaceDir(
masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
upsert(ns);
}
public synchronized void update(NamespaceDescriptor ns) throws IOException {
if (get(ns.getName()) == null) {
throw new ConstraintException("Namespace "+ns.getName()+" does not exist");
}
upsert(ns);
}
private void upsert(NamespaceDescriptor ns) throws IOException {
Put p = new Put(Bytes.toBytes(ns.getName()));
p.add(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
table.put(p);
try {
zkNamespaceManager.update(ns);
} catch(IOException ex) {
String msg = "Failed to update namespace information in ZK. Aborting.";
LOG.fatal(msg, ex);
masterServices.abort(msg, ex);
}
}
public synchronized void remove(String name) throws IOException {
if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(name)) {
throw new ConstraintException("Reserved namespace "+name+" cannot be removed.");
}
int tableCount = masterServices.getTableDescriptorsByNamespace(name).size();
if (tableCount > 0) {
throw new ConstraintException("Only empty namespaces can be removed. " +
"Namespace "+name+" has "+tableCount+" tables");
}
Delete d = new Delete(Bytes.toBytes(name));
table.delete(d);
//don't abort if cleanup isn't complete
//it will be replaced on new namespace creation
zkNamespaceManager.remove(name);
FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
for(FileStatus status :
fs.listStatus(FSUtils.getNamespaceDir(
masterServices.getMasterFileSystem().getRootDir(), name))) {
if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
throw new IOException("Namespace directory contains table dir: "+status.getPath());
}
}
if (!fs.delete(FSUtils.getNamespaceDir(
masterServices.getMasterFileSystem().getRootDir(), name), true)) {
throw new IOException("Failed to remove namespace: "+name);
}
}
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret =
Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
ResultScanner scanner = table.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for(Result r : scanner) {
ret.add(ProtobufUtil.toNamespaceDescriptor(
HBaseProtos.NamespaceDescriptor.parseFrom(
r.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue())));
}
} finally {
scanner.close();
}
return ret;
}
private void createNamespaceTable(MasterServices masterServices) throws IOException, InterruptedException {
HRegionInfo newRegions[] = new HRegionInfo[]{
new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), null, null)};
//we need to create the table this way to bypass
//checkInitialized
masterServices.getExecutorService()
.submit(new CreateTableHandler(masterServices,
masterServices.getMasterFileSystem(),
HTableDescriptor.NAMESPACE_TABLEDESC,
masterServices.getConfiguration(),
newRegions,
masterServices).prepare());
//wait for region to be online
int tries = conf.getInt("hbase.master.namespace.init.timeout", 600);
while(masterServices.getAssignmentManager()
.getRegionStates().getRegionServerOfRegion(newRegions[0]) == null &&
tries > 0) {
Thread.sleep(100);
tries--;
}
if (tries <= 0) {
throw new IOException("Failed to create namespace table.");
}
}
}

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.base.Joiner;
import com.google.common.collect.ArrayListMultimap;
@ -145,7 +144,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
regionPerServerIndex = 0;
for (HRegionInfo region : entry.getValue()) {
String tableName = region.getTableNameAsString();
String tableName = region.getTableName().getNameAsString();
Integer idx = tablesToIndex.get(tableName);
if (idx == null) {
tables.add(tableName);

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
@ -101,7 +102,7 @@ public class FavoredNodeAssignmentHelper {
* @throws IOException
*/
public static Map<HRegionInfo, ServerName> fullScan(
CatalogTracker catalogTracker, final Set<String> disabledTables,
CatalogTracker catalogTracker, final Set<TableName> disabledTables,
final boolean excludeOfflinedSplitParents,
FavoredNodeLoadBalancer balancer) throws IOException {
final Map<HRegionInfo, ServerName> regions =
@ -115,9 +116,9 @@ public class FavoredNodeAssignmentHelper {
Pair<HRegionInfo, ServerName> region = HRegionInfo.getHRegionInfoAndServerName(r);
HRegionInfo hri = region.getFirst();
if (hri == null) return true;
if (hri.getTableNameAsString() == null) return true;
if (hri.getTableName() == null) return true;
if (disabledTables.contains(
hri.getTableNameAsString())) return true;
hri.getTableName())) return true;
// Are we to include split parents in the list?
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
regions.put(hri, region.getSecond());

View File

@ -31,13 +31,13 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
@ -144,15 +144,15 @@ class RegionLocationFinder {
* @return HTableDescriptor
* @throws IOException
*/
protected HTableDescriptor getTableDescriptor(byte[] tableName) throws IOException {
protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException {
HTableDescriptor tableDescriptor = null;
try {
if (this.services != null) {
tableDescriptor = this.services.getTableDescriptors().get(Bytes.toString(tableName));
tableDescriptor = this.services.getTableDescriptors().get(tableName);
}
} catch (FileNotFoundException fnfe) {
LOG.debug("FileNotFoundException during getTableDescriptors." + " Current table name = "
+ Bytes.toStringBinary(tableName), fnfe);
+ tableName, fnfe);
}
return tableDescriptor;

Some files were not shown because too many files have changed in this diff Show More