HBASE-11862 Get rid of Writables in HTableDescriptor, HColumnDescriptor (Andrey Stepachev)

This commit is contained in:
stack 2014-09-15 11:08:21 -07:00
parent 3cc5d19039
commit 5554692871
14 changed files with 324 additions and 467 deletions

View File

@ -18,19 +18,17 @@
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Preconditions;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@ -38,15 +36,10 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.PrettyPrinter;
import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.util.ByteStringer;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* An HColumnDescriptor contains information about a column family such as the
@ -56,7 +49,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
// For future backward compatibility
// Version 3 was when column names become byte arrays and when we picked up
@ -235,8 +228,9 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
private final static Map<String, String> DEFAULT_VALUES
= new HashMap<String, String>();
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
= new HashSet<ImmutableBytesWritable>();
private final static Set<Bytes> RESERVED_KEYWORDS
= new HashSet<Bytes>();
static {
DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
@ -256,10 +250,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE));
DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN));
for (String s : DEFAULT_VALUES.keySet()) {
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
}
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION)));
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(ENCRYPTION_KEY)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
}
private static final int UNINITIALIZED = -1;
@ -268,8 +262,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
private byte [] name;
// Column metadata
private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
private final Map<Bytes, Bytes> values =
new HashMap<Bytes, Bytes>();
/**
* A map which holds the configuration specific to the column family.
@ -328,7 +322,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
public HColumnDescriptor(HColumnDescriptor desc) {
super();
this.name = desc.name.clone();
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e :
desc.values.entrySet()) {
this.values.put(e.getKey(), e.getValue());
}
@ -522,7 +516,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @return The value.
*/
public byte[] getValue(byte[] key) {
ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
Bytes ibw = values.get(new Bytes(key));
if (ibw == null)
return null;
return ibw.get();
@ -542,7 +536,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
/**
* @return All values.
*/
public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
public Map<Bytes, Bytes> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
}
@ -553,8 +547,8 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @return this (for chained invocation)
*/
public HColumnDescriptor setValue(byte[] key, byte[] value) {
values.put(new ImmutableBytesWritable(key),
new ImmutableBytesWritable(value));
values.put(new Bytes(key),
new Bytes(value));
return this;
}
@ -562,7 +556,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @param key Key whose key and value we're to remove from HCD parameters.
*/
public void remove(final byte [] key) {
values.remove(new ImmutableBytesWritable(key));
values.remove(new Bytes(key));
}
/**
@ -1022,7 +1016,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
boolean hasConfigKeys = false;
// print all reserved keys first
for (ImmutableBytesWritable k : values.keySet()) {
for (Bytes k : values.keySet()) {
if (!RESERVED_KEYWORDS.contains(k)) {
hasConfigKeys = true;
continue;
@ -1045,7 +1039,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
s.append(HConstants.METADATA).append(" => ");
s.append('{');
boolean printComma = false;
for (ImmutableBytesWritable k : values.keySet()) {
for (Bytes k : values.keySet()) {
if (RESERVED_KEYWORDS.contains(k)) {
continue;
}
@ -1123,111 +1117,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
return result;
}
/**
* @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
*/
@Deprecated
public void readFields(DataInput in) throws IOException {
int version = in.readByte();
if (version < 6) {
if (version <= 2) {
Text t = new Text();
t.readFields(in);
this.name = t.getBytes();
// if(KeyValue.getFamilyDelimiterIndex(this.name, 0, this.name.length)
// > 0) {
// this.name = stripColon(this.name);
// }
} else {
this.name = Bytes.readByteArray(in);
}
this.values.clear();
setMaxVersions(in.readInt());
int ordinal = in.readInt();
setCompressionType(Compression.Algorithm.values()[ordinal]);
setInMemory(in.readBoolean());
setBloomFilterType(in.readBoolean() ? BloomType.ROW : BloomType.NONE);
if (getBloomFilterType() != BloomType.NONE && version < 5) {
// If a bloomFilter is enabled and the column descriptor is less than
// version 5, we need to skip over it to read the rest of the column
// descriptor. There are no BloomFilterDescriptors written to disk for
// column descriptors with a version number >= 5
throw new UnsupportedClassVersionError(this.getClass().getName() +
" does not support backward compatibility with versions older " +
"than version 5");
}
if (version > 1) {
setBlockCacheEnabled(in.readBoolean());
}
if (version > 2) {
setTimeToLive(in.readInt());
}
} else {
// version 6+
this.name = Bytes.readByteArray(in);
this.values.clear();
int numValues = in.readInt();
for (int i = 0; i < numValues; i++) {
ImmutableBytesWritable key = new ImmutableBytesWritable();
ImmutableBytesWritable value = new ImmutableBytesWritable();
key.readFields(in);
value.readFields(in);
// in version 8, the BloomFilter setting changed from bool to enum
if (version < 8 && Bytes.toString(key.get()).equals(BLOOMFILTER)) {
value.set(Bytes.toBytes(
Boolean.getBoolean(Bytes.toString(value.get()))
? BloomType.ROW.toString()
: BloomType.NONE.toString()));
}
values.put(key, value);
}
if (version == 6) {
// Convert old values.
setValue(COMPRESSION, Compression.Algorithm.NONE.getName());
}
String value = getValue(HConstants.VERSIONS);
this.cachedMaxVersions = (value != null)?
Integer.valueOf(value).intValue(): DEFAULT_VERSIONS;
if (version > 10) {
configuration.clear();
int numConfigs = in.readInt();
for (int i = 0; i < numConfigs; i++) {
ImmutableBytesWritable key = new ImmutableBytesWritable();
ImmutableBytesWritable val = new ImmutableBytesWritable();
key.readFields(in);
val.readFields(in);
configuration.put(
Bytes.toString(key.get(), key.getOffset(), key.getLength()),
Bytes.toString(val.get(), val.getOffset(), val.getLength()));
}
}
}
}
/**
* @deprecated Writables are going away. Use {@link #toByteArray()} instead.
*/
@Deprecated
public void write(DataOutput out) throws IOException {
out.writeByte(COLUMN_DESCRIPTOR_VERSION);
Bytes.writeByteArray(out, this.name);
out.writeInt(values.size());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
e.getKey().write(out);
e.getValue().write(out);
}
out.writeInt(configuration.size());
for (Map.Entry<String, String> e : configuration.entrySet()) {
new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
}
}
// Comparable
public int compareTo(HColumnDescriptor o) {
int result = Bytes.compareTo(this.name, o.getName());
if (result == 0) {
@ -1300,7 +1190,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
public ColumnFamilySchema convert() {
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
builder.setName(ByteStringer.wrap(getName()));
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));

View File

@ -80,30 +80,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HRegionInfo implements Comparable<HRegionInfo> {
/*
* There are two versions associated with HRegionInfo: HRegionInfo.VERSION and
* HConstants.META_VERSION. HRegionInfo.VERSION indicates the data structure's versioning
* while HConstants.META_VERSION indicates the versioning of the serialized HRIs stored in
* the hbase:meta table.
*
* Pre-0.92:
* HRI.VERSION == 0 and HConstants.META_VERSION does not exist (is not stored at hbase:meta table)
* HRegionInfo had an HTableDescriptor reference inside it.
* HRegionInfo is serialized as Writable to hbase:meta table.
* For 0.92.x and 0.94.x:
* HRI.VERSION == 1 and HConstants.META_VERSION == 0
* HRI no longer has HTableDescriptor in it.
* HRI is serialized as Writable to hbase:meta table.
* For 0.96.x:
* HRI.VERSION == 1 and HConstants.META_VERSION == 1
* HRI data structure is the same as 0.92 and 0.94
* HRI is serialized as PB to hbase:meta table.
*
* Versioning of HRegionInfo is deprecated. HRegionInfo does protobuf
* serialization using RegionInfo class, which has it's own versioning.
*/
@Deprecated
public static final byte VERSION = 1;
private static final Log LOG = LogFactory.getLog(HRegionInfo.class);
/**
@ -829,86 +806,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
return this.hashCode;
}
/** @return the object version number
* @deprecated HRI is no longer a VersionedWritable */
@Deprecated
public byte getVersion() {
return VERSION;
}
/**
* @deprecated Use protobuf serialization instead. See {@link #toByteArray()} and
* {@link #toDelimitedByteArray()}
*/
@Deprecated
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion());
Bytes.writeByteArray(out, endKey);
out.writeBoolean(offLine);
out.writeLong(regionId);
Bytes.writeByteArray(out, regionName);
out.writeBoolean(split);
Bytes.writeByteArray(out, startKey);
Bytes.writeByteArray(out, tableName.getName());
out.writeInt(hashCode);
}
/**
* @deprecated Use protobuf deserialization instead.
* @see #parseFrom(byte[])
*/
@Deprecated
public void readFields(DataInput in) throws IOException {
// Read the single version byte. We don't ask the super class do it
// because freaks out if its not the current classes' version. This method
// can deserialize version 0 and version 1 of HRI.
byte version = in.readByte();
if (version == 0) {
// This is the old HRI that carried an HTD. Migrate it. The below
// was copied from the old 0.90 HRI readFields.
this.endKey = Bytes.readByteArray(in);
this.offLine = in.readBoolean();
this.regionId = in.readLong();
this.regionName = Bytes.readByteArray(in);
this.split = in.readBoolean();
this.startKey = Bytes.readByteArray(in);
try {
HTableDescriptor htd = new HTableDescriptor();
htd.readFields(in);
this.tableName = htd.getTableName();
} catch(EOFException eofe) {
throw new IOException("HTD not found in input buffer", eofe);
}
this.hashCode = in.readInt();
} else if (getVersion() == version) {
this.endKey = Bytes.readByteArray(in);
this.offLine = in.readBoolean();
this.regionId = in.readLong();
this.regionName = Bytes.readByteArray(in);
this.split = in.readBoolean();
this.startKey = Bytes.readByteArray(in);
this.tableName = TableName.valueOf(Bytes.readByteArray(in));
this.hashCode = in.readInt();
} else {
throw new IOException("Non-migratable/unknown version=" + getVersion());
}
}
@Deprecated
private void readFields(byte[] bytes, int offset, int len) throws IOException {
if (bytes == null || len <= 0) {
throw new IllegalArgumentException("Can't build a writable with empty " +
"bytes array");
}
DataInputBuffer in = new DataInputBuffer();
try {
in.reset(bytes, offset, len);
this.readFields(in);
} finally {
in.close();
}
}
//
// Comparable
//
@ -1106,13 +1003,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
throw new DeserializationException(e);
}
} else {
try {
HRegionInfo hri = new HRegionInfo();
hri.readFields(bytes, offset, len);
return hri;
} catch (IOException e) {
throw new DeserializationException(e);
}
throw new DeserializationException("PB encoded HRegionInfo expected");
}
}
@ -1354,25 +1245,12 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
if (in.markSupported()) { //read it with mark()
in.mark(pblen);
}
int read = in.read(pbuf); //assumption: if Writable serialization, it should be longer than pblen.
int read = in.read(pbuf); //assumption: it should be longer than pblen.
if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in));
} else {
// Presume Writables. Need to reset the stream since it didn't start w/ pb.
if (in.markSupported()) {
in.reset();
HRegionInfo hri = new HRegionInfo();
hri.readFields(in);
return hri;
} else {
//we cannot use BufferedInputStream, it consumes more than we read from the underlying IS
ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);
SequenceInputStream sis = new SequenceInputStream(bais, in); //concatenate input streams
HRegionInfo hri = new HRegionInfo();
hri.readFields(new DataInputStream(sis));
return hri;
}
throw new IOException("PB encoded HRegionInfo expected");
}
}

View File

@ -18,8 +18,7 @@
*/
package org.apache.hadoop.hbase;
import java.io.DataInput;
import java.io.DataOutput;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@ -34,7 +33,7 @@ import java.util.TreeMap;
import java.util.TreeSet;
import java.util.regex.Matcher;
import org.apache.hadoop.hbase.util.ByteStringer;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -42,7 +41,6 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
@ -50,11 +48,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.WritableComparable;
import com.google.protobuf.InvalidProtocolBufferException;
/**
* HTableDescriptor contains the details about an HBase table such as the descriptors of
@ -64,20 +59,10 @@ import com.google.protobuf.InvalidProtocolBufferException;
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public class HTableDescriptor implements Comparable<HTableDescriptor> {
private static final Log LOG = LogFactory.getLog(HTableDescriptor.class);
/**
* Changes prior to version 3 were not recorded here.
* Version 3 adds metadata as a map where keys and values are byte[].
* Version 4 adds indexes
* Version 5 removed transactional pollution -- e.g. indexes
* Version 6 changed metadata to BytesBytesPair in PB
* Version 7 adds table-level configuration
*/
private static final byte TABLE_DESCRIPTOR_VERSION = 7;
private TableName name = null;
/**
@ -85,8 +70,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
* MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
*/
private final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
private final Map<Bytes, Bytes> values =
new HashMap<Bytes, Bytes>();
/**
* A map which holds the configuration specific to the table.
@ -105,12 +90,12 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #getMaxFileSize()
*/
public static final String MAX_FILESIZE = "MAX_FILESIZE";
private static final ImmutableBytesWritable MAX_FILESIZE_KEY =
new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
private static final Bytes MAX_FILESIZE_KEY =
new Bytes(Bytes.toBytes(MAX_FILESIZE));
public static final String OWNER = "OWNER";
public static final ImmutableBytesWritable OWNER_KEY =
new ImmutableBytesWritable(Bytes.toBytes(OWNER));
public static final Bytes OWNER_KEY =
new Bytes(Bytes.toBytes(OWNER));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
@ -119,8 +104,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #isReadOnly()
*/
public static final String READONLY = "READONLY";
private static final ImmutableBytesWritable READONLY_KEY =
new ImmutableBytesWritable(Bytes.toBytes(READONLY));
private static final Bytes READONLY_KEY =
new Bytes(Bytes.toBytes(READONLY));
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
@ -129,8 +114,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #isCompactionEnabled()
*/
public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED";
private static final ImmutableBytesWritable COMPACTION_ENABLED_KEY =
new ImmutableBytesWritable(Bytes.toBytes(COMPACTION_ENABLED));
private static final Bytes COMPACTION_ENABLED_KEY =
new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
@ -140,8 +125,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #getMemStoreFlushSize()
*/
public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE";
private static final ImmutableBytesWritable MEMSTORE_FLUSHSIZE_KEY =
new ImmutableBytesWritable(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
private static final Bytes MEMSTORE_FLUSHSIZE_KEY =
new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
@ -150,8 +135,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #isRootRegion()
*/
public static final String IS_ROOT = "IS_ROOT";
private static final ImmutableBytesWritable IS_ROOT_KEY =
new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
private static final Bytes IS_ROOT_KEY =
new Bytes(Bytes.toBytes(IS_ROOT));
/**
* <em>INTERNAL</em> Used by rest interface to access this metadata
@ -161,8 +146,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #isMetaRegion()
*/
public static final String IS_META = "IS_META";
private static final ImmutableBytesWritable IS_META_KEY =
new ImmutableBytesWritable(Bytes.toBytes(IS_META));
private static final Bytes IS_META_KEY =
new Bytes(Bytes.toBytes(IS_META));
/**
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
@ -172,22 +157,22 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
@Deprecated
public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
@Deprecated
private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
new ImmutableBytesWritable(Bytes.toBytes(DEFERRED_LOG_FLUSH));
private static final Bytes DEFERRED_LOG_FLUSH_KEY =
new Bytes(Bytes.toBytes(DEFERRED_LOG_FLUSH));
/**
* <em>INTERNAL</em> {@link Durability} setting for the table.
*/
public static final String DURABILITY = "DURABILITY";
private static final ImmutableBytesWritable DURABILITY_KEY =
new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
private static final Bytes DURABILITY_KEY =
new Bytes(Bytes.toBytes("DURABILITY"));
/**
* <em>INTERNAL</em> number of region replicas for the table.
*/
public static final String REGION_REPLICATION = "REGION_REPLICATION";
private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
private static final Bytes REGION_REPLICATION_KEY =
new Bytes(Bytes.toBytes(REGION_REPLICATION));
/** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
@ -197,11 +182,11 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* replace booleans being saved as Strings with plain booleans. Need a
* migration script to do this. TODO.
*/
private static final ImmutableBytesWritable FALSE =
new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
private static final Bytes FALSE =
new Bytes(Bytes.toBytes(Boolean.FALSE.toString()));
private static final ImmutableBytesWritable TRUE =
new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
private static final Bytes TRUE =
new Bytes(Bytes.toBytes(Boolean.TRUE.toString()));
private static final boolean DEFAULT_DEFERRED_LOG_FLUSH = false;
@ -225,8 +210,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
private final static Map<String, String> DEFAULT_VALUES
= new HashMap<String, String>();
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
= new HashSet<ImmutableBytesWritable>();
private final static Set<Bytes> RESERVED_KEYWORDS
= new HashSet<Bytes>();
static {
DEFAULT_VALUES.put(MAX_FILESIZE,
String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
@ -238,7 +224,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
for (String s : DEFAULT_VALUES.keySet()) {
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)));
}
RESERVED_KEYWORDS.add(IS_ROOT_KEY);
RESERVED_KEYWORDS.add(IS_META_KEY);
@ -280,12 +266,12 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* catalog tables, <code>hbase:meta</code> and <code>-ROOT-</code>.
*/
protected HTableDescriptor(final TableName name, HColumnDescriptor[] families,
Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
Map<Bytes, Bytes> values) {
setName(name);
for(HColumnDescriptor descriptor : families) {
this.families.put(descriptor.getName(), descriptor);
}
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry:
for (Map.Entry<Bytes, Bytes> entry :
values.entrySet()) {
setValue(entry.getKey(), entry.getValue());
}
@ -345,7 +331,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
for (HColumnDescriptor c: desc.families.values()) {
this.families.put(c.getName(), new HColumnDescriptor(c));
}
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e :
desc.values.entrySet()) {
setValue(e.getKey(), e.getValue());
}
@ -409,7 +395,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
return (value != null)? Boolean.valueOf(Bytes.toString(value)): Boolean.FALSE;
}
private boolean isSomething(final ImmutableBytesWritable key,
private boolean isSomething(final Bytes key,
final boolean valueIfNull) {
byte [] value = getValue(key);
if (value != null) {
@ -447,11 +433,11 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #values
*/
public byte[] getValue(byte[] key) {
return getValue(new ImmutableBytesWritable(key));
return getValue(new Bytes(key));
}
private byte[] getValue(final ImmutableBytesWritable key) {
ImmutableBytesWritable ibw = values.get(key);
private byte[] getValue(final Bytes key) {
Bytes ibw = values.get(key);
if (ibw == null)
return null;
return ibw.get();
@ -477,7 +463,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @return unmodifiable map {@link #values}.
* @see #values
*/
public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
public Map<Bytes, Bytes> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
}
@ -490,16 +476,16 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #values
*/
public void setValue(byte[] key, byte[] value) {
setValue(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value));
setValue(new Bytes(key), new Bytes(value));
}
/*
* @param key The key.
* @param value The value.
*/
private void setValue(final ImmutableBytesWritable key,
private void setValue(final Bytes key,
final String value) {
setValue(key, new ImmutableBytesWritable(Bytes.toBytes(value)));
setValue(key, new Bytes(Bytes.toBytes(value)));
}
/*
@ -508,8 +494,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @param key The key.
* @param value The value.
*/
public void setValue(final ImmutableBytesWritable key,
final ImmutableBytesWritable value) {
public void setValue(final Bytes key,
final Bytes value) {
if (key.compareTo(DEFERRED_LOG_FLUSH_KEY) == 0) {
boolean isDeferredFlush = Boolean.valueOf(Bytes.toString(value.get()));
LOG.warn("HTableDescriptor property:" + DEFERRED_LOG_FLUSH + " is deprecated, " +
@ -542,7 +528,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* parameters.
*/
public void remove(final String key) {
remove(new ImmutableBytesWritable(Bytes.toBytes(key)));
remove(new Bytes(Bytes.toBytes(key)));
}
/**
@ -551,7 +537,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @param key Key whose key and value we're to remove from HTableDescriptor
* parameters.
*/
public void remove(ImmutableBytesWritable key) {
public void remove(Bytes key) {
values.remove(key);
}
@ -562,7 +548,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* parameters.
*/
public void remove(final byte [] key) {
remove(new ImmutableBytesWritable(key));
remove(new Bytes(key));
}
/**
@ -817,9 +803,9 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
StringBuilder s = new StringBuilder();
// step 1: set partitioning and pruning
Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
Set<ImmutableBytesWritable> userKeys = new TreeSet<ImmutableBytesWritable>();
for (ImmutableBytesWritable k : values.keySet()) {
Set<Bytes> reservedKeys = new TreeSet<Bytes>();
Set<Bytes> userKeys = new TreeSet<Bytes>();
for (Bytes k : values.keySet()) {
if (k == null || k.get() == null) continue;
String key = Bytes.toString(k.get());
// in this section, print out reserved keywords + coprocessor info
@ -851,7 +837,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
// print all reserved keys first
boolean printCommaForAttr = false;
for (ImmutableBytesWritable k : reservedKeys) {
for (Bytes k : reservedKeys) {
String key = Bytes.toString(k.get());
String value = Bytes.toStringBinary(values.get(k).get());
if (printCommaForAttr) s.append(", ");
@ -868,7 +854,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
s.append(HConstants.METADATA).append(" => ");
s.append("{");
boolean printCommaForCfg = false;
for (ImmutableBytesWritable k : userKeys) {
for (Bytes k : userKeys) {
String key = Bytes.toString(k.get());
String value = Bytes.toStringBinary(values.get(k).get());
if (printCommaForCfg) s.append(", ");
@ -931,8 +917,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
@Override
public int hashCode() {
int result = this.name.hashCode();
result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode();
if (this.families != null && this.families.size() > 0) {
if (this.families.size() > 0) {
for (HColumnDescriptor e: this.families.values()) {
result ^= e.hashCode();
}
@ -942,84 +927,6 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
return result;
}
/**
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* and is used for de-serialization of the HTableDescriptor over RPC
* @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
*/
@Deprecated
@Override
public void readFields(DataInput in) throws IOException {
int version = in.readInt();
if (version < 3)
throw new IOException("versions < 3 are not supported (and never existed!?)");
// version 3+
name = TableName.valueOf(Bytes.readByteArray(in));
setRootRegion(in.readBoolean());
setMetaRegion(in.readBoolean());
values.clear();
configuration.clear();
int numVals = in.readInt();
for (int i = 0; i < numVals; i++) {
ImmutableBytesWritable key = new ImmutableBytesWritable();
ImmutableBytesWritable value = new ImmutableBytesWritable();
key.readFields(in);
value.readFields(in);
setValue(key, value);
}
families.clear();
int numFamilies = in.readInt();
for (int i = 0; i < numFamilies; i++) {
HColumnDescriptor c = new HColumnDescriptor();
c.readFields(in);
families.put(c.getName(), c);
}
if (version >= 7) {
int numConfigs = in.readInt();
for (int i = 0; i < numConfigs; i++) {
ImmutableBytesWritable key = new ImmutableBytesWritable();
ImmutableBytesWritable value = new ImmutableBytesWritable();
key.readFields(in);
value.readFields(in);
configuration.put(
Bytes.toString(key.get(), key.getOffset(), key.getLength()),
Bytes.toString(value.get(), value.getOffset(), value.getLength()));
}
}
}
/**
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* and is used for serialization of the HTableDescriptor over RPC
* @deprecated Writables are going away.
* Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
*/
@Deprecated
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(TABLE_DESCRIPTOR_VERSION);
Bytes.writeByteArray(out, name.toBytes());
out.writeBoolean(isRootRegion());
out.writeBoolean(isMetaRegion());
out.writeInt(values.size());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
e.getKey().write(out);
e.getValue().write(out);
}
out.writeInt(families.size());
for(Iterator<HColumnDescriptor> it = families.values().iterator();
it.hasNext(); ) {
HColumnDescriptor family = it.next();
family.write(out);
}
out.writeInt(configuration.size());
for (Map.Entry<String, String> e : configuration.entrySet()) {
new ImmutableBytesWritable(Bytes.toBytes(e.getKey())).write(out);
new ImmutableBytesWritable(Bytes.toBytes(e.getValue())).write(out);
}
}
// Comparable
/**
@ -1030,7 +937,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* 1 if there is a mismatch in the contents
*/
@Override
public int compareTo(final HTableDescriptor other) {
public int compareTo(@Nonnull final HTableDescriptor other) {
int result = this.name.compareTo(other.name);
if (result == 0) {
result = families.size() - other.families.size();
@ -1094,7 +1001,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public void setRegionReplication(int regionReplication) {
setValue(REGION_REPLICATION_KEY,
new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
new Bytes(Bytes.toBytes(Integer.toString(regionReplication))));
}
/**
@ -1207,7 +1114,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
// generate a coprocessor key
int maxCoprocessorNumber = 0;
Matcher keyMatcher;
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e :
this.values.entrySet()) {
keyMatcher =
HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
@ -1237,7 +1144,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public boolean hasCoprocessor(String className) {
Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e :
this.values.entrySet()) {
keyMatcher =
HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(
@ -1269,7 +1176,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
List<String> result = new ArrayList<String>();
Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values.entrySet()) {
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
if (!keyMatcher.matches()) {
continue;
@ -1289,10 +1196,10 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @param className Class name of the co-processor
*/
public void removeCoprocessor(String className) {
ImmutableBytesWritable match = null;
Bytes match = null;
Matcher keyMatcher;
Matcher valueMatcher;
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : this.values
for (Map.Entry<Bytes, Bytes> e : this.values
.entrySet()) {
keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e
.getKey().get()));
@ -1427,7 +1334,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public static HTableDescriptor parseFrom(final byte [] bytes)
throws DeserializationException, IOException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
throw new DeserializationException("Expected PB encoded HTableDescriptor");
}
int pblen = ProtobufUtil.lengthOfPBMagic();
TableSchema.Builder builder = TableSchema.newBuilder();
@ -1446,7 +1353,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
public TableSchema convert() {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(getTableName()));
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder();
aBuilder.setFirst(ByteStringer.wrap(e.getKey().get()));
aBuilder.setSecond(ByteStringer.wrap(e.getValue().get()));

View File

@ -31,7 +31,7 @@ import java.util.Map;
import org.apache.commons.collections.iterators.UnmodifiableIterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -149,27 +149,27 @@ public class CompoundConfiguration extends Configuration {
}
/**
* Add ImmutableBytesWritable map to config list. This map is generally
* Add Bytes map to config list. This map is generally
* created by HTableDescriptor or HColumnDescriptor, but can be abstractly
* used. The added configuration overrides the previous ones if there are
* name collisions.
*
* @param map
* ImmutableBytesWritable map
* Bytes map
* @return this, for builder pattern
*/
public CompoundConfiguration addWritableMap(
final Map<ImmutableBytesWritable, ImmutableBytesWritable> map) {
public CompoundConfiguration addBytesMap(
final Map<Bytes, Bytes> map) {
freezeMutableConf();
// put new map at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
Map<ImmutableBytesWritable, ImmutableBytesWritable> m = map;
Map<Bytes, Bytes> m = map;
@Override
public Iterator<Map.Entry<String,String>> iterator() {
Map<String, String> ret = new HashMap<String, String>();
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : map.entrySet()) {
for (Map.Entry<Bytes, Bytes> entry : map.entrySet()) {
String key = Bytes.toString(entry.getKey().get());
String val = entry.getValue() == null ? null : Bytes.toString(entry.getValue().get());
ret.put(key, val);
@ -179,11 +179,11 @@ public class CompoundConfiguration extends Configuration {
@Override
public String get(String key) {
ImmutableBytesWritable ibw = new ImmutableBytesWritable(Bytes
Bytes ibw = new Bytes(Bytes
.toBytes(key));
if (!m.containsKey(ibw))
return null;
ImmutableBytesWritable value = m.get(ibw);
Bytes value = m.get(ibw);
if (value == null || value.get() == null)
return null;
return Bytes.toString(value.get());

View File

@ -39,13 +39,13 @@ import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
@ -58,11 +58,14 @@ import com.google.common.collect.Lists;
/**
* Utility class that handles byte arrays, conversions to/from other types,
* comparisons, hash code generation, manufacturing keys for HashMaps or
* HashSets, etc.
* HashSets, and can be used as key in maps or trees.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Bytes {
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
justification="It has been like this forever")
public class Bytes implements Comparable<Bytes> {
//HConstants.UTF8_ENCODING should be updated if this changed
/** When we encode strings, we always specify UTF8 encoding */
private static final String UTF8_ENCODING = "UTF-8";
@ -136,6 +139,190 @@ public class Bytes {
return b == null ? 0 : b.length;
}
private byte[] bytes;
private int offset;
private int length;
/**
* Create a zero-size sequence.
*/
public Bytes() {
super();
}
/**
* Create a Bytes using the byte array as the initial value.
* @param bytes This array becomes the backing storage for the object.
*/
public Bytes(byte[] bytes) {
this(bytes, 0, bytes.length);
}
/**
* Set the new Bytes to the contents of the passed
* <code>ibw</code>.
* @param ibw the value to set this Bytes to.
*/
public Bytes(final Bytes ibw) {
this(ibw.get(), ibw.getOffset(), ibw.getLength());
}
/**
* Set the value to a given byte range
* @param bytes the new byte range to set to
* @param offset the offset in newData to start at
* @param length the number of bytes in the range
*/
public Bytes(final byte[] bytes, final int offset,
final int length) {
this.bytes = bytes;
this.offset = offset;
this.length = length;
}
/**
* Copy bytes from ByteString instance.
* @param byteString copy from
*/
public Bytes(final ByteString byteString) {
this(byteString.toByteArray());
}
/**
* Get the data from the Bytes.
* @return The data is only valid between offset and offset+length.
*/
public byte [] get() {
if (this.bytes == null) {
throw new IllegalStateException("Uninitialiized. Null constructor " +
"called w/o accompaying readFields invocation");
}
return this.bytes;
}
/**
* @param b Use passed bytes as backing array for this instance.
*/
public void set(final byte [] b) {
set(b, 0, b.length);
}
/**
* @param b Use passed bytes as backing array for this instance.
* @param offset
* @param length
*/
public void set(final byte [] b, final int offset, final int length) {
this.bytes = b;
this.offset = offset;
this.length = length;
}
/**
* @return the number of valid bytes in the buffer
* @deprecated use {@link #getLength()} instead
*/
@Deprecated
public int getSize() {
if (this.bytes == null) {
throw new IllegalStateException("Uninitialiized. Null constructor " +
"called w/o accompaying readFields invocation");
}
return this.length;
}
/**
* @return the number of valid bytes in the buffer
*/
public int getLength() {
if (this.bytes == null) {
throw new IllegalStateException("Uninitialiized. Null constructor " +
"called w/o accompaying readFields invocation");
}
return this.length;
}
/**
* @return offset
*/
public int getOffset(){
return this.offset;
}
public ByteString toByteString() {
return ByteString.copyFrom(this.bytes, this.offset, this.length);
}
@Override
public int hashCode() {
return Bytes.hashCode(bytes, offset, length);
}
/**
* Define the sort order of the Bytes.
* @param that The other bytes writable
* @return Positive if left is bigger than right, 0 if they are equal, and
* negative if left is smaller than right.
*/
public int compareTo(Bytes that) {
return BYTES_RAWCOMPARATOR.compare(
this.bytes, this.offset, this.length,
that.bytes, that.offset, that.length);
}
/**
* Compares the bytes in this object to the specified byte array
* @param that
* @return Positive if left is bigger than right, 0 if they are equal, and
* negative if left is smaller than right.
*/
public int compareTo(final byte [] that) {
return BYTES_RAWCOMPARATOR.compare(
this.bytes, this.offset, this.length,
that, 0, that.length);
}
/**
* @see Object#equals(Object)
*/
@Override
public boolean equals(Object right_obj) {
if (right_obj instanceof byte []) {
return compareTo((byte [])right_obj) == 0;
}
if (right_obj instanceof Bytes) {
return compareTo((Bytes)right_obj) == 0;
}
return false;
}
/**
* @see Object#toString()
*/
@Override
public String toString() {
return Bytes.toString(bytes, offset, length);
}
/**
* @param array List of byte [].
* @return Array of byte [].
*/
public static byte [][] toArray(final List<byte []> array) {
// List#toArray doesn't work on lists of byte [].
byte[][] results = new byte[array.size()][];
for (int i = 0; i < array.size(); i++) {
results[i] = array.get(i);
}
return results;
}
/**
* Returns a copy of the bytes referred to by this writable
*/
public byte[] copyBytes() {
return Arrays.copyOfRange(bytes, offset, offset+length);
}
/**
* Byte array comparator class.
*/
@ -1356,8 +1543,8 @@ public class Bytes {
/**
* @param b bytes to hash
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
* {@link ImmutableBytesWritable} use calculating hash code.
* passed in array. This method is what {@link org.apache.hadoop.io.Text}
* use calculating hash code.
*/
public static int hashCode(final byte [] b) {
return hashCode(b, b.length);
@ -1367,8 +1554,8 @@ public class Bytes {
* @param b value
* @param length length of the value
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
* {@link ImmutableBytesWritable} use calculating hash code.
* passed in array. This method is what {@link org.apache.hadoop.io.Text}
* use calculating hash code.
*/
public static int hashCode(final byte [] b, final int length) {
return WritableComparator.hashBytes(b, length);

View File

@ -25,7 +25,6 @@ import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -116,23 +115,23 @@ public class TestCompoundConfiguration extends TestCase {
assertEquals(baseConfSize + 1, cnt);
}
private ImmutableBytesWritable strToIbw(String s) {
return new ImmutableBytesWritable(Bytes.toBytes(s));
private Bytes strToIb(String s) {
return new Bytes(Bytes.toBytes(s));
}
@Test
public void testWithIbwMap() {
Map<ImmutableBytesWritable, ImmutableBytesWritable> map =
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
map.put(strToIbw("B"), strToIbw("2b"));
map.put(strToIbw("C"), strToIbw("33"));
map.put(strToIbw("D"), strToIbw("4"));
Map<Bytes, Bytes> map =
new HashMap<Bytes, Bytes>();
map.put(strToIb("B"), strToIb("2b"));
map.put(strToIb("C"), strToIb("33"));
map.put(strToIb("D"), strToIb("4"));
// unlike config, note that IBW Maps can accept null values
map.put(strToIbw("G"), null);
map.put(strToIb("G"), null);
CompoundConfiguration compoundConf = new CompoundConfiguration()
.add(baseConf)
.addWritableMap(map);
.addBytesMap(map);
assertEquals("1", compoundConf.get("A"));
assertEquals("2b", compoundConf.get("B"));
assertEquals(33, compoundConf.getInt("C", 0));
@ -157,7 +156,7 @@ public class TestCompoundConfiguration extends TestCase {
conf2.set("D", "not4");
assertEquals("modification", conf2.get("X"));
assertEquals("not4", conf2.get("D"));
conf2.addWritableMap(map);
conf2.addBytesMap(map);
assertEquals("4", conf2.get("D")); // map overrides
}

View File

@ -34,7 +34,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@ -121,9 +120,9 @@ public final class Constraints {
disable(desc);
// remove all the constraint settings
List<ImmutableBytesWritable> keys = new ArrayList<ImmutableBytesWritable>();
List<Bytes> keys = new ArrayList<Bytes>();
// loop through all the key, values looking for constraints
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : desc
for (Map.Entry<Bytes, Bytes> e : desc
.getValues().entrySet()) {
String key = Bytes.toString((e.getKey().get()));
String[] className = CONSTRAINT_HTD_ATTR_KEY_PATTERN.split(key);
@ -132,7 +131,7 @@ public final class Constraints {
}
}
// now remove all the keys we found
for (ImmutableBytesWritable key : keys) {
for (Bytes key : keys) {
desc.remove(key);
}
}
@ -562,7 +561,7 @@ public final class Constraints {
ClassLoader classloader) throws IOException {
List<Constraint> constraints = new ArrayList<Constraint>();
// loop through all the key, values looking for constraints
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e : desc
for (Map.Entry<Bytes, Bytes> e : desc
.getValues().entrySet()) {
// read out the constraint
String key = Bytes.toString(e.getKey().get()).trim();

View File

@ -586,7 +586,7 @@ public class HRegion implements HeapSize { // , Writable{
this.conf = new CompoundConfiguration()
.add(confParam)
.addStringMap(htd.getConfiguration())
.addWritableMap(htd.getValues());
.addBytesMap(htd.getValues());
this.flushCheckInterval = conf.getInt(MEMSTORE_PERIODIC_FLUSH_INTERVAL,
DEFAULT_CACHE_FLUSH_INTERVAL);
this.flushPerChanges = conf.getLong(MEMSTORE_FLUSH_PER_CHANGES, DEFAULT_FLUSH_PER_CHANGES);

View File

@ -221,7 +221,7 @@ public class HStore implements Store {
.add(confParam)
.addStringMap(region.getTableDesc().getConfiguration())
.addStringMap(family.getConfiguration())
.addWritableMap(family.getValues());
.addBytesMap(family.getValues());
this.blocksize = family.getBlocksize();
this.dataBlockEncoder =

View File

@ -32,6 +32,10 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.regex.Matcher;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.protobuf.Message;
import com.google.protobuf.Service;
import org.apache.commons.collections.map.AbstractReferenceMap;
import org.apache.commons.collections.map.ReferenceMap;
import org.apache.commons.logging.Log;
@ -68,7 +72,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
@ -78,11 +81,6 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.protobuf.Message;
import com.google.protobuf.Service;
/**
* Implements the coprocessor environment and runtime support for coprocessors
* loaded within a {@link HRegion}.
@ -192,7 +190,7 @@ public class RegionCoprocessorHost
// scan the table attributes for coprocessor load specifications
// initialize the coprocessors
List<RegionEnvironment> configured = new ArrayList<RegionEnvironment>();
for (Map.Entry<ImmutableBytesWritable,ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e :
region.getTableDesc().getValues().entrySet()) {
String key = Bytes.toString(e.getKey().get()).trim();
String spec = Bytes.toString(e.getValue().get()).trim();

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
import org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
import org.apache.hadoop.hbase.rest.protobuf.generated.TableSchemaMessage.TableSchema;
@ -88,7 +87,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
*/
public TableSchemaModel(HTableDescriptor htd) {
setName(htd.getTableName().getNameAsString());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e:
htd.getValues().entrySet()) {
addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
@ -96,9 +95,9 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
for (HColumnDescriptor hcd: htd.getFamilies()) {
ColumnSchemaModel columnModel = new ColumnSchemaModel();
columnModel.setName(hcd.getNameAsString());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e:
hcd.getValues().entrySet()) {
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
}
addColumnFamily(columnModel);

View File

@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@ -59,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.IOUtils;
@ -690,7 +690,7 @@ public class RestoreSnapshotHelper {
for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
htd.addFamily(hcd);
}
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
for (Map.Entry<Bytes, Bytes> e:
snapshotTableDescriptor.getValues().entrySet()) {
htd.setValue(e.getKey(), e.getValue());
}

View File

@ -141,9 +141,8 @@ public class TestSerialization {
@Test public void testTableDescriptor() throws Exception {
final String name = "testTableDescriptor";
HTableDescriptor htd = createTableDescriptor(name);
byte [] mb = Writables.getBytes(htd);
HTableDescriptor deserializedHtd =
(HTableDescriptor)Writables.getWritable(mb, new HTableDescriptor());
byte [] mb = htd.toByteArray();
HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(mb);
assertEquals(htd.getTableName(), deserializedHtd.getTableName());
}

View File

@ -153,6 +153,7 @@ public class ThriftServerRunner implements Runnable {
private static final String DEFAULT_BIND_ADDR = "0.0.0.0";
public static final int DEFAULT_LISTEN_PORT = 9090;
public static final int HREGION_VERSION = 1;
private final int listenPort;
private Configuration conf;
@ -749,7 +750,7 @@ public class ThriftServerRunner implements Runnable {
region.endKey = ByteBuffer.wrap(info.getEndKey());
region.id = info.getRegionId();
region.name = ByteBuffer.wrap(info.getRegionName());
region.version = info.getVersion();
region.version = HREGION_VERSION; // HRegion now not versioned, PB encoding used
results.add(region);
}
return results;
@ -1554,7 +1555,7 @@ public class ThriftServerRunner implements Runnable {
region.setEndKey(regionInfo.getEndKey());
region.id = regionInfo.getRegionId();
region.setName(regionInfo.getRegionName());
region.version = regionInfo.getVersion();
region.version = HREGION_VERSION; // version not used anymore, PB encoding used.
// find region assignment to server
ServerName serverName = HRegionInfo.getServerName(startRowResult);