diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 46e97c3ead1..fb0b0ee8fe7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -18,29 +18,21 @@ */ package org.apache.hadoop.hbase; -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; import java.util.Map; -import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PrettyPrinter; import org.apache.hadoop.hbase.util.PrettyPrinter.Unit; -import com.google.common.base.Preconditions; - /** * An HColumnDescriptor contains information about a column family such as the * number of versions, compression settings, etc. @@ -48,261 +40,64 @@ import com.google.common.base.Preconditions; * It is used as input when creating a table or adding a column. */ @InterfaceAudience.Public -public class HColumnDescriptor implements Comparable { - // For future backward compatibility - - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding - // Version 10 -- change metadata to standard type. - // Version 11 -- add column family level configuration. - private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; - - public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - - // These constants are used as FileInfo keys - public static final String COMPRESSION = "COMPRESSION"; - public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - public static final String ENCODE_ON_DISK = // To be removed, it is not used anymore - "ENCODE_ON_DISK"; - public static final String DATA_BLOCK_ENCODING = - "DATA_BLOCK_ENCODING"; - /** - * Key for the BLOCKCACHE attribute. - * A more exact name would be CACHE_DATA_ON_READ because this flag sets whether or not we - * cache DATA blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot be - * disabled. - */ - public static final String BLOCKCACHE = "BLOCKCACHE"; - public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - /** - * Key for cache data into L1 if cache is set up with more than one tier. - * To set in the shell, do something like this: - * hbase(main):003:0> create 't', - * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}} - */ - public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1"; - - /** - * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. - * If set, all INDEX, BLOOM, and DATA blocks of HFiles belonging to this - * family will be loaded into the cache as soon as the file is opened. These - * loads will not count as cache misses. - */ - public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; - - /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). Note that this is a soft limit and that - * blocks have overhead (metadata, CRCs) so blocks will tend to be the size - * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k - * means hbase data will align with an SSDs 4k page accesses (TODO). - */ - public static final String BLOCKSIZE = "BLOCKSIZE"; - +@Deprecated // remove it in 3.0 +public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable { + public static final String IN_MEMORY_COMPACTION = ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION; + public static final String COMPRESSION = ColumnFamilyDescriptorBuilder.COMPRESSION; + public static final String COMPRESSION_COMPACT = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT; + public static final String ENCODE_ON_DISK = "ENCODE_ON_DISK"; + public static final String DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING; + public static final String BLOCKCACHE = ColumnFamilyDescriptorBuilder.BLOCKCACHE; + public static final String CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE; + public static final String CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE; + public static final String CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE; + public static final String EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE; + public static final String CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1; + public static final String PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN; + public static final String BLOCKSIZE = ColumnFamilyDescriptorBuilder.BLOCKSIZE; public static final String LENGTH = "LENGTH"; - public static final String TTL = "TTL"; - public static final String BLOOMFILTER = "BLOOMFILTER"; + public static final String TTL = ColumnFamilyDescriptorBuilder.TTL; + public static final String BLOOMFILTER = ColumnFamilyDescriptorBuilder.BLOOMFILTER; public static final String FOREVER = "FOREVER"; - public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE"; + public static final String REPLICATION_SCOPE = ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE; public static final byte[] REPLICATION_SCOPE_BYTES = Bytes.toBytes(REPLICATION_SCOPE); - public static final String MIN_VERSIONS = "MIN_VERSIONS"; - /** - * Retain all cells across flushes and compactions even if they fall behind - * a delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. - */ - public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; - - public static final String ENCRYPTION = "ENCRYPTION"; - public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY"; - - public static final String IS_MOB = "IS_MOB"; + public static final String MIN_VERSIONS = ColumnFamilyDescriptorBuilder.MIN_VERSIONS; + public static final String KEEP_DELETED_CELLS = ColumnFamilyDescriptorBuilder.KEEP_DELETED_CELLS; + public static final String COMPRESS_TAGS = ColumnFamilyDescriptorBuilder.COMPRESS_TAGS; + public static final String ENCRYPTION = ColumnFamilyDescriptorBuilder.ENCRYPTION; + public static final String ENCRYPTION_KEY = ColumnFamilyDescriptorBuilder.ENCRYPTION_KEY; + public static final String IS_MOB = ColumnFamilyDescriptorBuilder.IS_MOB; public static final byte[] IS_MOB_BYTES = Bytes.toBytes(IS_MOB); - public static final String MOB_THRESHOLD = "MOB_THRESHOLD"; + public static final String MOB_THRESHOLD = ColumnFamilyDescriptorBuilder.MOB_THRESHOLD; public static final byte[] MOB_THRESHOLD_BYTES = Bytes.toBytes(MOB_THRESHOLD); - public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k - public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; - public static final byte[] MOB_COMPACT_PARTITION_POLICY_BYTES = - Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY); - public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY = - MobCompactPartitionPolicy.DAILY; - - public static final String DFS_REPLICATION = "DFS_REPLICATION"; - public static final short DEFAULT_DFS_REPLICATION = 0; - - public static final String STORAGE_POLICY = "STORAGE_POLICY"; - - /** - * Default compression type. - */ - public static final String DEFAULT_COMPRESSION = - Compression.Algorithm.NONE.getName(); - - /** - * Default value of the flag that enables data block encoding on disk, as - * opposed to encoding in cache only. We encode blocks everywhere by default, - * as long as {@link #DATA_BLOCK_ENCODING} is not NONE. - */ + public static final long DEFAULT_MOB_THRESHOLD = ColumnFamilyDescriptorBuilder.DEFAULT_MOB_THRESHOLD; + public static final String MOB_COMPACT_PARTITION_POLICY = ColumnFamilyDescriptorBuilder.MOB_COMPACT_PARTITION_POLICY; + public static final byte[] MOB_COMPACT_PARTITION_POLICY_BYTES = Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY + = ColumnFamilyDescriptorBuilder.DEFAULT_MOB_COMPACT_PARTITION_POLICY; + public static final String DFS_REPLICATION = ColumnFamilyDescriptorBuilder.DFS_REPLICATION; + public static final short DEFAULT_DFS_REPLICATION = ColumnFamilyDescriptorBuilder.DEFAULT_DFS_REPLICATION; + public static final String STORAGE_POLICY = ColumnFamilyDescriptorBuilder.STORAGE_POLICY; + public static final String DEFAULT_COMPRESSION = ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESSION.name(); public static final boolean DEFAULT_ENCODE_ON_DISK = true; - - /** Default data block encoding algorithm. */ - public static final String DEFAULT_DATA_BLOCK_ENCODING = - DataBlockEncoding.NONE.toString(); - - /** - * Default number of versions of a record to keep. - */ - public static final int DEFAULT_VERSIONS = HBaseConfiguration.create().getInt( - "hbase.column.max.version", 1); - - /** - * Default is not to keep a minimum of versions. - */ - public static final int DEFAULT_MIN_VERSIONS = 0; - - /* - * Cache here the HCD value. - * Question: its OK to cache since when we're reenable, we create a new HCD? - */ - private volatile Integer blocksize = null; - - /** - * Default setting for whether to try and serve this column family from memory or not. - */ - public static final boolean DEFAULT_IN_MEMORY = false; - - /** - * Default setting for preventing deleted from being collected immediately. - */ - public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE; - - /** - * Default setting for whether to use a block cache or not. - */ - public static final boolean DEFAULT_BLOCKCACHE = true; - - /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. - */ - public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; - - /** - * Default setting for whether to cache data blocks in L1 tier. Only makes sense if more than - * one tier in operations: i.e. if we have an L1 and a L2. This will be the cases if we are - * using BucketCache. - */ - public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false; - - /** - * Default setting for whether to cache index blocks on write if block - * caching is enabled. - */ - public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; - - /** - * Default size of blocks in files stored to the filesytem (hfiles). - */ - public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE; - - /** - * Default setting for whether or not to use bloomfilters. - */ - public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString(); - - /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. - */ - public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; - - /** - * Default time to live of cell contents. - */ - public static final int DEFAULT_TTL = HConstants.FOREVER; - - /** - * Default scope. - */ - public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; - - /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. - */ - public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; - - /** - * Default compress tags along with any type of DataBlockEncoding. - */ - public static final boolean DEFAULT_COMPRESS_TAGS = true; - - /* - * Default setting for whether to prefetch blocks into the blockcache on open. - */ - public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false; - - private final static Map DEFAULT_VALUES = new HashMap<>(); - private final static Set RESERVED_KEYWORDS = new HashSet<>(); - - static { - DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER); - DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); - DEFAULT_VALUES.put(HConstants.VERSIONS, String.valueOf(DEFAULT_VERSIONS)); - DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); - DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION); - DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); - DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); - DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); - DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); - DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); - DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); - DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1)); - DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); - DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); - DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); - DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); - for (String s : DEFAULT_VALUES.keySet()) { - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s))); - } - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION))); - RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY))); - RESERVED_KEYWORDS.add(new Bytes(IS_MOB_BYTES)); - RESERVED_KEYWORDS.add(new Bytes(MOB_THRESHOLD_BYTES)); - RESERVED_KEYWORDS.add(new Bytes(MOB_COMPACT_PARTITION_POLICY_BYTES)); - } - - private static final int UNINITIALIZED = -1; - - // Column family name - private byte [] name; - - // Column metadata - private final Map values = new HashMap<>(); - - /** - * A map which holds the configuration specific to the column family. - * The keys of the map have the same names as config keys and override the defaults with - * cf-specific settings. Example usage may be for compactions, etc. - */ - private final Map configuration = new HashMap<>(); - - /* - * Cache the max versions rather than calculate it every time. - */ - private int cachedMaxVersions = UNINITIALIZED; - + public static final String DEFAULT_DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DEFAULT_DATA_BLOCK_ENCODING.name(); + public static final int DEFAULT_VERSIONS = ColumnFamilyDescriptorBuilder.DEFAULT_MAX_VERSIONS; + public static final int DEFAULT_MIN_VERSIONS = ColumnFamilyDescriptorBuilder.DEFAULT_MIN_VERSIONS; + public static final boolean DEFAULT_IN_MEMORY = ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY; + public static final KeepDeletedCells DEFAULT_KEEP_DELETED = ColumnFamilyDescriptorBuilder.DEFAULT_KEEP_DELETED; + public static final boolean DEFAULT_BLOCKCACHE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKCACHE; + public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_ON_WRITE; + public static final boolean DEFAULT_CACHE_DATA_IN_L1 = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1; + public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_INDEX_ON_WRITE; + public static final int DEFAULT_BLOCKSIZE = ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; + public static final String DEFAULT_BLOOMFILTER = ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.name(); + public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_BLOOMS_ON_WRITE; + public static final int DEFAULT_TTL = ColumnFamilyDescriptorBuilder.DEFAULT_TTL; + public static final int DEFAULT_REPLICATION_SCOPE = ColumnFamilyDescriptorBuilder.DEFAULT_REPLICATION_SCOPE; + public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.DEFAULT_EVICT_BLOCKS_ON_CLOSE; + public static final boolean DEFAULT_COMPRESS_TAGS = ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESS_TAGS; + public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.DEFAULT_PREFETCH_BLOCKS_ON_OPEN; + protected final ModifyableColumnFamilyDescriptor delegatee; /** * Construct a column descriptor specifying only the family name * The other attributes are defaulted. @@ -322,20 +117,7 @@ public class HColumnDescriptor implements Comparable { * letter -- and may not contain a : */ public HColumnDescriptor(final byte [] familyName) { - isLegalFamilyName(familyName); - this.name = familyName; - - setMaxVersions(DEFAULT_VERSIONS); - setMinVersions(DEFAULT_MIN_VERSIONS); - setKeepDeletedCells(DEFAULT_KEEP_DELETED); - setInMemory(DEFAULT_IN_MEMORY); - setBlockCacheEnabled(DEFAULT_BLOCKCACHE); - setTimeToLive(DEFAULT_TTL); - setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT))); - setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase(Locale.ROOT))); - setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase(Locale.ROOT))); - setBlocksize(DEFAULT_BLOCKSIZE); - setScope(DEFAULT_REPLICATION_SCOPE); + this(new ModifyableColumnFamilyDescriptor(familyName)); } /** @@ -345,16 +127,16 @@ public class HColumnDescriptor implements Comparable { * @param desc The descriptor. */ public HColumnDescriptor(HColumnDescriptor desc) { - super(); - this.name = desc.name.clone(); - for (Map.Entry e : - desc.values.entrySet()) { - this.values.put(e.getKey(), e.getValue()); - } - for (Map.Entry e : desc.configuration.entrySet()) { - this.configuration.put(e.getKey(), e.getValue()); - } - setMaxVersions(desc.getMaxVersions()); + this(desc, true); + } + + protected HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) { + this(deepClone ? new ModifyableColumnFamilyDescriptor(desc) + : desc.delegatee); + } + + protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) { + this.delegatee = delegate; } /** @@ -364,54 +146,36 @@ public class HColumnDescriptor implements Comparable { * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because * b can be null when deserializing). Cannot start with a '.' * either. Also Family can not be an empty value or equal "recovered.edits". + * @deprecated Use {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}. */ + @Deprecated public static byte [] isLegalFamilyName(final byte [] b) { - if (b == null) { - return b; - } - Preconditions.checkArgument(b.length != 0, "Family name can not be empty"); - if (b[0] == '.') { - throw new IllegalArgumentException("Family names cannot start with a " + - "period: " + Bytes.toString(b)); - } - for (int i = 0; i < b.length; i++) { - if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { - throw new IllegalArgumentException("Illegal character <" + b[i] + - ">. Family names cannot contain control characters or colons: " + - Bytes.toString(b)); - } - } - byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); - if (Bytes.equals(recoveredEdit, b)) { - throw new IllegalArgumentException("Family name cannot be: " + - HConstants.RECOVERED_EDITS_DIR); - } - return b; + return ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b); } /** * @return Name of this column family */ + @Override public byte [] getName() { - return name; + return delegatee.getName(); } /** - * @return Name of this column family + * @return The name string of this column family */ + @Override public String getNameAsString() { - return Bytes.toString(this.name); + return delegatee.getNameAsString(); } /** * @param key The key. * @return The value. */ + @Override public byte[] getValue(byte[] key) { - Bytes ibw = values.get(new Bytes(key)); - if (ibw == null) - return null; - return ibw.get(); + return delegatee.getValue(key); } /** @@ -420,17 +184,12 @@ public class HColumnDescriptor implements Comparable { */ public String getValue(String key) { byte[] value = getValue(Bytes.toBytes(key)); - if (value == null) - return null; - return Bytes.toString(value); + return value == null ? null : Bytes.toString(value); } - /** - * @return All values. - */ + @Override public Map getValues() { - // shallow pointer copy - return Collections.unmodifiableMap(values); + return delegatee.getValues(); } /** @@ -439,18 +198,16 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setValue(byte[] key, byte[] value) { - if (Bytes.compareTo(Bytes.toBytes(HConstants.VERSIONS), key) == 0) { - cachedMaxVersions = UNINITIALIZED; - } - values.put(new Bytes(key), new Bytes(value)); + getDelegateeForModification().setValue(key, value); return this; } /** * @param key Key whose key and value we're to remove from HCD parameters. */ - public void remove(final byte [] key) { - values.remove(new Bytes(key)); + public HColumnDescriptor remove(final byte [] key) { + getDelegateeForModification().removeValue(new Bytes(key)); + return this; } /** @@ -459,11 +216,7 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setValue(String key, String value) { - if (value == null) { - remove(Bytes.toBytes(key)); - } else { - setValue(Bytes.toBytes(key), Bytes.toBytes(value)); - } + getDelegateeForModification().setValue(key, value); return this; } @@ -489,32 +242,17 @@ public class HColumnDescriptor implements Comparable { return getCompactionCompressionType(); } - /** @return maximum number of versions */ + @Override public int getMaxVersions() { - if (this.cachedMaxVersions == UNINITIALIZED) { - String v = getValue(HConstants.VERSIONS); - this.cachedMaxVersions = Integer.parseInt(v); - } - return this.cachedMaxVersions; + return delegatee.getMaxVersions(); } /** - * @param maxVersions maximum number of versions + * @param value maximum number of versions * @return this (for chained invocation) */ - public HColumnDescriptor setMaxVersions(int maxVersions) { - if (maxVersions <= 0) { - // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". - // Until there is support, consider 0 or < 0 -- a configuration error. - throw new IllegalArgumentException("Maximum versions must be positive"); - } - if (maxVersions < this.getMinVersions()) { - throw new IllegalArgumentException("Set MaxVersion to " + maxVersions - + " while minVersion is " + this.getMinVersions() - + ". Maximum versions must be >= minimum versions "); - } - setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); - cachedMaxVersions = maxVersions; + public HColumnDescriptor setMaxVersions(int value) { + getDelegateeForModification().setMaxVersions(value); return this; } @@ -542,39 +280,24 @@ public class HColumnDescriptor implements Comparable { return this; } - /** - * @return The storefile/hfile blocksize for this column family. - */ - public synchronized int getBlocksize() { - if (this.blocksize == null) { - String value = getValue(BLOCKSIZE); - this.blocksize = (value != null)? - Integer.decode(value): Integer.valueOf(DEFAULT_BLOCKSIZE); - } - return this.blocksize.intValue(); - + @Override + public int getBlocksize() { + return delegatee.getBlocksize(); } /** - * @param s Blocksize to use when writing out storefiles/hfiles on this + * @param value Blocksize to use when writing out storefiles/hfiles on this * column family. * @return this (for chained invocation) */ - public HColumnDescriptor setBlocksize(int s) { - setValue(BLOCKSIZE, Integer.toString(s)); - this.blocksize = null; + public HColumnDescriptor setBlocksize(int value) { + getDelegateeForModification().setBlocksize(value); return this; } - /** - * @return Compression type setting. - */ + @Override public Compression.Algorithm getCompressionType() { - String n = getValue(COMPRESSION); - if (n == null) { - return Compression.Algorithm.NONE; - } - return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT)); + return delegatee.getCompressionType(); } /** @@ -582,73 +305,49 @@ public class HColumnDescriptor implements Comparable { * LZO is not bundled as part of the hbase distribution. * See LZO Compression * for how to enable it. - * @param type Compression type setting. + * @param value Compression type setting. * @return this (for chained invocation) */ - public HColumnDescriptor setCompressionType(Compression.Algorithm type) { - return setValue(COMPRESSION, type.getName().toUpperCase(Locale.ROOT)); + public HColumnDescriptor setCompressionType(Compression.Algorithm value) { + getDelegateeForModification().setCompressionType(value); + return this; } - /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk - */ + @Override public DataBlockEncoding getDataBlockEncoding() { - String type = getValue(DATA_BLOCK_ENCODING); - if (type == null) { - type = DEFAULT_DATA_BLOCK_ENCODING; - } - return DataBlockEncoding.valueOf(type); + return delegatee.getDataBlockEncoding(); } /** * Set data block encoding algorithm used in block cache. - * @param type What kind of data block encoding will be used. + * @param value What kind of data block encoding will be used. * @return this (for chained invocation) */ - public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) { - String name; - if (type != null) { - name = type.toString(); - } else { - name = DataBlockEncoding.NONE.toString(); - } - return setValue(DATA_BLOCK_ENCODING, name); + public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding value) { + getDelegateeForModification().setDataBlockEncoding(value); + return this; } /** * Set whether the tags should be compressed along with DataBlockEncoding. When no * DataBlockEncoding is been used, this is having no effect. * - * @param compressTags + * @param value * @return this (for chained invocation) */ - public HColumnDescriptor setCompressTags(boolean compressTags) { - return setValue(COMPRESS_TAGS, String.valueOf(compressTags)); + public HColumnDescriptor setCompressTags(boolean value) { + getDelegateeForModification().setCompressTags(value); + return this; } - /** - * @return Whether KV tags should be compressed along with DataBlockEncoding. When no - * DataBlockEncoding is been used, this is having no effect. - */ + @Override public boolean isCompressTags() { - String compressTagsStr = getValue(COMPRESS_TAGS); - boolean compressTags = DEFAULT_COMPRESS_TAGS; - if (compressTagsStr != null) { - compressTags = Boolean.parseBoolean(compressTagsStr); - } - return compressTags; + return delegatee.isCompressTags(); } - /** - * @return Compression type setting. - */ + @Override public Compression.Algorithm getCompactionCompressionType() { - String n = getValue(COMPRESSION_COMPACT); - if (n == null) { - return getCompressionType(); - } - return Compression.Algorithm.valueOf(n.toUpperCase(Locale.ROOT)); + return delegatee.getCompactionCompressionType(); } /** @@ -656,180 +355,144 @@ public class HColumnDescriptor implements Comparable { * LZO is not bundled as part of the hbase distribution. * See LZO Compression * for how to enable it. - * @param type Compression type setting. + * @param value Compression type setting. * @return this (for chained invocation) */ - public HColumnDescriptor setCompactionCompressionType( - Compression.Algorithm type) { - return setValue(COMPRESSION_COMPACT, type.getName().toUpperCase(Locale.ROOT)); + public HColumnDescriptor setCompactionCompressionType(Compression.Algorithm value) { + getDelegateeForModification().setCompactionCompressionType(value); + return this; } - /** - * @return True if we are to favor keeping all values for this column family in the - * HRegionServer cache. - */ + @Override public boolean isInMemory() { - String value = getValue(HConstants.IN_MEMORY); - if (value != null) { - return Boolean.parseBoolean(value); - } - return DEFAULT_IN_MEMORY; + return delegatee.isInMemory(); } /** - * @param inMemory True if we are to favor keeping all values for this column family in the + * @param value True if we are to favor keeping all values for this column family in the * HRegionServer cache * @return this (for chained invocation) */ - public HColumnDescriptor setInMemory(boolean inMemory) { - return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); + public HColumnDescriptor setInMemory(boolean value) { + getDelegateeForModification().setInMemory(value); + return this; } - /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for - * for this column family - */ + @Override public MemoryCompactionPolicy getInMemoryCompaction() { - String value = getValue(IN_MEMORY_COMPACTION); - if (value != null) { - return MemoryCompactionPolicy.valueOf(value); - } - return null; + return delegatee.getInMemoryCompaction(); } /** - * @param inMemoryCompaction the prefered in-memory compaction policy + * @param value the prefered in-memory compaction policy * for this column family * @return this (for chained invocation) */ - public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { - return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString()); + public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy value) { + getDelegateeForModification().setInMemoryCompaction(value); + return this; } + @Override public KeepDeletedCells getKeepDeletedCells() { - String value = getValue(KEEP_DELETED_CELLS); - if (value != null) { - // toUpperCase for backwards compatibility - return KeepDeletedCells.valueOf(value.toUpperCase(Locale.ROOT)); - } - return DEFAULT_KEEP_DELETED; + return delegatee.getKeepDeletedCells(); } /** - * @param keepDeletedCells True if deleted rows should not be collected + * @param value True if deleted rows should not be collected * immediately. * @return this (for chained invocation) */ - public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { - return setValue(KEEP_DELETED_CELLS, keepDeletedCells.toString()); + public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells value) { + getDelegateeForModification().setKeepDeletedCells(value); + return this; } - /** - * @return Time-to-live of cell contents, in seconds. - */ + @Override public int getTimeToLive() { - String value = getValue(TTL); - return (value != null)? Integer.parseInt(value) : DEFAULT_TTL; + return delegatee.getTimeToLive(); } /** - * @param timeToLive Time-to-live of cell contents, in seconds. + * @param value Time-to-live of cell contents, in seconds. * @return this (for chained invocation) */ - public HColumnDescriptor setTimeToLive(int timeToLive) { - return setValue(TTL, Integer.toString(timeToLive)); + public HColumnDescriptor setTimeToLive(int value) { + getDelegateeForModification().setTimeToLive(value); + return this; } /** - * @param timeToLive Time to live of cell contents, in human readable format + * @param value Time to live of cell contents, in human readable format * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) * @return this (for chained invocation) */ - public HColumnDescriptor setTimeToLive(String timeToLive) throws HBaseException { - return setValue(TTL, PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL)); + public HColumnDescriptor setTimeToLive(String value) throws HBaseException { + getDelegateeForModification().setTimeToLive(value); + return this; } - /** - * @return The minimum number of versions to keep. - */ + @Override public int getMinVersions() { - String value = getValue(MIN_VERSIONS); - return (value != null)? Integer.parseInt(value) : 0; + return delegatee.getMinVersions(); } /** - * @param minVersions The minimum number of versions to keep. + * @param value The minimum number of versions to keep. * (used when timeToLive is set) * @return this (for chained invocation) */ - public HColumnDescriptor setMinVersions(int minVersions) { - return setValue(MIN_VERSIONS, Integer.toString(minVersions)); + public HColumnDescriptor setMinVersions(int value) { + getDelegateeForModification().setMinVersions(value); + return this; } - /** - * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). - */ + @Override public boolean isBlockCacheEnabled() { - String value = getValue(BLOCKCACHE); - if (value != null) { - return Boolean.parseBoolean(value); - } - return DEFAULT_BLOCKCACHE; + return delegatee.isBlockCacheEnabled(); } /** - * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache + * @param value True if hfile DATA type blocks should be cached (We always cache * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ - public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { - return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + public HColumnDescriptor setBlockCacheEnabled(boolean value) { + getDelegateeForModification().setBlockCacheEnabled(value); + return this; } - /** - * @return bloom filter type used for new StoreFiles in ColumnFamily - */ + @Override public BloomType getBloomFilterType() { - String n = getValue(BLOOMFILTER); - if (n == null) { - n = DEFAULT_BLOOMFILTER; - } - return BloomType.valueOf(n.toUpperCase(Locale.ROOT)); + return delegatee.getBloomFilterType(); } /** - * @param bt bloom filter type + * @param value bloom filter type * @return this (for chained invocation) */ - public HColumnDescriptor setBloomFilterType(final BloomType bt) { - return setValue(BLOOMFILTER, bt.toString()); + public HColumnDescriptor setBloomFilterType(final BloomType value) { + getDelegateeForModification().setBloomFilterType(value); + return this; } - /** - * @return the scope tag - */ + @Override public int getScope() { - byte[] value = getValue(REPLICATION_SCOPE_BYTES); - if (value != null) { - return Integer.parseInt(Bytes.toString(value)); - } - return DEFAULT_REPLICATION_SCOPE; + return delegatee.getScope(); } /** - * @param scope the scope tag + * @param value the scope tag * @return this (for chained invocation) */ - public HColumnDescriptor setScope(int scope) { - return setValue(REPLICATION_SCOPE, Integer.toString(scope)); + public HColumnDescriptor setScope(int value) { + getDelegateeForModification().setScope(value); + return this; } - /** - * @return true if we should cache data blocks on write - */ + @Override public boolean isCacheDataOnWrite() { - return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE); + return delegatee.isCacheDataOnWrite(); } /** @@ -837,15 +500,13 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setCacheDataOnWrite(boolean value) { - return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value)); + getDelegateeForModification().setCacheDataOnWrite(value); + return this; } - /** - * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more - * than one tier; e.g. we are using CombinedBlockCache). - */ + @Override public boolean isCacheDataInL1() { - return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1); + return delegatee.isCacheDataInL1(); } /** @@ -854,22 +515,13 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setCacheDataInL1(boolean value) { - return setValue(CACHE_DATA_IN_L1, Boolean.toString(value)); + getDelegateeForModification().setCacheDataInL1(value); + return this; } - private boolean setAndGetBoolean(final String key, final boolean defaultSetting) { - String value = getValue(key); - if (value != null) { - return Boolean.parseBoolean(value); - } - return defaultSetting; - } - - /** - * @return true if we should cache index blocks on write - */ + @Override public boolean isCacheIndexesOnWrite() { - return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE); + return delegatee.isCacheIndexesOnWrite(); } /** @@ -877,14 +529,13 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setCacheIndexesOnWrite(boolean value) { - return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value)); + getDelegateeForModification().setCacheIndexesOnWrite(value); + return this; } - /** - * @return true if we should cache bloomfilter blocks on write - */ + @Override public boolean isCacheBloomsOnWrite() { - return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE); + return delegatee.isCacheBloomsOnWrite(); } /** @@ -892,14 +543,13 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setCacheBloomsOnWrite(boolean value) { - return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value)); + getDelegateeForModification().setCacheBloomsOnWrite(value); + return this; } - /** - * @return true if we should evict cached blocks from the blockcache on close - */ + @Override public boolean isEvictBlocksOnClose() { - return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + return delegatee.isEvictBlocksOnClose(); } /** @@ -908,14 +558,13 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setEvictBlocksOnClose(boolean value) { - return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value)); + getDelegateeForModification().setEvictBlocksOnClose(value); + return this; } - /** - * @return true if we should prefetch blocks into the blockcache on open - */ + @Override public boolean isPrefetchBlocksOnOpen() { - return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + return delegatee.isPrefetchBlocksOnOpen(); } /** @@ -923,7 +572,8 @@ public class HColumnDescriptor implements Comparable { * @return this (for chained invocation) */ public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) { - return setValue(PREFETCH_BLOCKS_ON_OPEN, Boolean.toString(value)); + getDelegateeForModification().setPrefetchBlocksOnOpen(value); + return this; } /** @@ -931,113 +581,23 @@ public class HColumnDescriptor implements Comparable { */ @Override public String toString() { - StringBuilder s = new StringBuilder(); - - s.append('{'); - s.append(HConstants.NAME); - s.append(" => '"); - s.append(Bytes.toString(name)); - s.append("'"); - s.append(getValues(true)); - s.append('}'); - return s.toString(); + return delegatee.toString(); } /** * @return Column family descriptor with only the customized attributes. */ + @Override public String toStringCustomizedValues() { - StringBuilder s = new StringBuilder(); - s.append('{'); - s.append(HConstants.NAME); - s.append(" => '"); - s.append(Bytes.toString(name)); - s.append("'"); - s.append(getValues(false)); - s.append('}'); - return s.toString(); - } - - private StringBuilder getValues(boolean printDefaults) { - StringBuilder s = new StringBuilder(); - - boolean hasConfigKeys = false; - - // print all reserved keys first - for (Map.Entry entry : values.entrySet()) { - if (!RESERVED_KEYWORDS.contains(entry.getKey())) { - hasConfigKeys = true; - continue; - } - String key = Bytes.toString(entry.getKey().get()); - String value = Bytes.toStringBinary(entry.getValue().get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { - s.append(", "); - s.append(key); - s.append(" => "); - s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); - } - } - - // print all non-reserved, advanced config keys as a separate subset - if (hasConfigKeys) { - s.append(", "); - s.append(HConstants.METADATA).append(" => "); - s.append('{'); - boolean printComma = false; - for (Bytes k : values.keySet()) { - if (RESERVED_KEYWORDS.contains(k)) { - continue; - } - String key = Bytes.toString(k.get()); - String value = Bytes.toStringBinary(values.get(k).get()); - if (printComma) { - s.append(", "); - } - printComma = true; - s.append('\'').append(key).append('\''); - s.append(" => "); - s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); - } - s.append('}'); - } - - if (!configuration.isEmpty()) { - s.append(", "); - s.append(HConstants.CONFIGURATION).append(" => "); - s.append('{'); - boolean printCommaForConfiguration = false; - for (Map.Entry e : configuration.entrySet()) { - if (printCommaForConfiguration) s.append(", "); - printCommaForConfiguration = true; - s.append('\'').append(e.getKey()).append('\''); - s.append(" => "); - s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); - } - s.append("}"); - } - return s; + return delegatee.toStringCustomizedValues(); } public static Unit getUnit(String key) { - Unit unit; - /* TTL for now, we can add more as we need */ - if (key.equals(HColumnDescriptor.TTL)) { - unit = Unit.TIME_INTERVAL; - } else if (key.equals(HColumnDescriptor.MOB_THRESHOLD)) { - unit = Unit.LONG; - } else if (key.equals(HColumnDescriptor.IS_MOB)) { - unit = Unit.BOOLEAN; - } else { - unit = Unit.NONE; - } - return unit; + return ColumnFamilyDescriptorBuilder.getUnit(key); } public static Map getDefaultValues() { - return Collections.unmodifiableMap(DEFAULT_VALUES); + return ColumnFamilyDescriptorBuilder.getDefaultValues(); } /** @@ -1062,33 +622,12 @@ public class HColumnDescriptor implements Comparable { */ @Override public int hashCode() { - int result = Bytes.hashCode(this.name); - result ^= (int) COLUMN_DESCRIPTOR_VERSION; - result ^= values.hashCode(); - result ^= configuration.hashCode(); - return result; + return delegatee.hashCode(); } - // Comparable @Override - public int compareTo(HColumnDescriptor o) { - int result = Bytes.compareTo(this.name, o.getName()); - if (result == 0) { - // punt on comparison for ordering, just calculate difference. - result = this.values.hashCode() - o.values.hashCode(); - if (result < 0) - result = -1; - else if (result > 0) - result = 1; - } - if (result == 0) { - result = this.configuration.hashCode() - o.configuration.hashCode(); - if (result < 0) - result = -1; - else if (result > 0) - result = 1; - } - return result; + public int compareTo(HColumnDescriptor other) { + return delegatee.compareTo(other.delegatee); } /** @@ -1096,8 +635,7 @@ public class HColumnDescriptor implements Comparable { * @see #parseFrom(byte[]) */ public byte[] toByteArray() { - return ProtobufUtil - .prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this).toByteArray()); + return ColumnFamilyDescriptorBuilder.toByteArray(delegatee); } /** @@ -1107,191 +645,150 @@ public class HColumnDescriptor implements Comparable { * @see #toByteArray() */ public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException { - if (!ProtobufUtil.isPBMagicPrefix(bytes)) throw new DeserializationException("No magic"); - int pblen = ProtobufUtil.lengthOfPBMagic(); - ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); - ColumnFamilySchema cfs = null; - try { - ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); - cfs = builder.build(); - } catch (IOException e) { - throw new DeserializationException(e); + ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes); + if (desc instanceof ModifyableColumnFamilyDescriptor) { + return new HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc); + } else { + return new HColumnDescriptor(new ModifyableColumnFamilyDescriptor(desc)); } - return ProtobufUtil.convertToHColumnDesc(cfs); } - /** - * Getter for accessing the configuration value by key. - */ + @Override public String getConfigurationValue(String key) { - return configuration.get(key); + return delegatee.getConfigurationValue(key); } - /** - * Getter for fetching an unmodifiable {@link #configuration} map. - */ + @Override public Map getConfiguration() { - // shallow pointer copy - return Collections.unmodifiableMap(configuration); + return delegatee.getConfiguration(); } /** - * Setter for storing a configuration setting in {@link #configuration} map. + * Setter for storing a configuration setting. * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the configuration. */ public HColumnDescriptor setConfiguration(String key, String value) { - if (value == null) { - removeConfiguration(key); - } else { - configuration.put(key, value); - } + getDelegateeForModification().setConfiguration(key, value); return this; } /** - * Remove a configuration setting represented by the key from the {@link #configuration} map. + * Remove a configuration setting represented by the key. */ - public void removeConfiguration(final String key) { - configuration.remove(key); + public HColumnDescriptor removeConfiguration(final String key) { + getDelegateeForModification().removeConfiguration(key); + return this; } - /** - * Return the encryption algorithm in use by this family - */ + @Override public String getEncryptionType() { - return getValue(ENCRYPTION); + return delegatee.getEncryptionType(); } /** * Set the encryption algorithm for use with this family - * @param algorithm + * @param value */ - public HColumnDescriptor setEncryptionType(String algorithm) { - setValue(ENCRYPTION, algorithm); + public HColumnDescriptor setEncryptionType(String value) { + getDelegateeForModification().setEncryptionType(value); return this; } - /** Return the raw crypto key attribute for the family, or null if not set */ + @Override public byte[] getEncryptionKey() { - return getValue(Bytes.toBytes(ENCRYPTION_KEY)); + return delegatee.getEncryptionKey(); } /** Set the raw crypto key attribute for the family */ - public HColumnDescriptor setEncryptionKey(byte[] keyBytes) { - setValue(Bytes.toBytes(ENCRYPTION_KEY), keyBytes); + public HColumnDescriptor setEncryptionKey(byte[] value) { + getDelegateeForModification().setEncryptionKey(value); return this; } - /** - * Gets the mob threshold of the family. - * If the size of a cell value is larger than this threshold, it's regarded as a mob. - * The default threshold is 1024*100(100K)B. - * @return The mob threshold. - */ + @Override public long getMobThreshold() { - byte[] threshold = getValue(MOB_THRESHOLD_BYTES); - return threshold != null && threshold.length == Bytes.SIZEOF_LONG ? Bytes.toLong(threshold) - : DEFAULT_MOB_THRESHOLD; + return delegatee.getMobThreshold(); } /** * Sets the mob threshold of the family. - * @param threshold The mob threshold. + * @param value The mob threshold. * @return this (for chained invocation) */ - public HColumnDescriptor setMobThreshold(long threshold) { - setValue(MOB_THRESHOLD_BYTES, Bytes.toBytes(threshold)); + public HColumnDescriptor setMobThreshold(long value) { + getDelegateeForModification().setMobThreshold(value); return this; } - /** - * Gets whether the mob is enabled for the family. - * @return True if the mob is enabled for the family. - */ + @Override public boolean isMobEnabled() { - byte[] isMobEnabled = getValue(IS_MOB_BYTES); - return isMobEnabled != null && isMobEnabled.length == Bytes.SIZEOF_BOOLEAN - && Bytes.toBoolean(isMobEnabled); + return delegatee.isMobEnabled(); } /** * Enables the mob for the family. - * @param isMobEnabled Whether to enable the mob for the family. + * @param value Whether to enable the mob for the family. * @return this (for chained invocation) */ - public HColumnDescriptor setMobEnabled(boolean isMobEnabled) { - setValue(IS_MOB_BYTES, Bytes.toBytes(isMobEnabled)); + public HColumnDescriptor setMobEnabled(boolean value) { + getDelegateeForModification().setMobEnabled(value); return this; } - /** - * Get the mob compact partition policy for this family - * @return MobCompactPartitionPolicy - */ + @Override public MobCompactPartitionPolicy getMobCompactPartitionPolicy() { - String policy = getValue(MOB_COMPACT_PARTITION_POLICY); - if (policy == null) { - return DEFAULT_MOB_COMPACT_PARTITION_POLICY; - } - - return MobCompactPartitionPolicy.valueOf(policy.toUpperCase(Locale.ROOT)); + return delegatee.getMobCompactPartitionPolicy(); } /** * Set the mob compact partition policy for the family. - * @param policy policy type + * @param value policy type * @return this (for chained invocation) */ - public HColumnDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { - return setValue(MOB_COMPACT_PARTITION_POLICY, policy.toString().toUpperCase(Locale.ROOT)); + public HColumnDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy value) { + getDelegateeForModification().setMobCompactPartitionPolicy(value); + return this; } - /** - * @return replication factor set for this CF or {@link #DEFAULT_DFS_REPLICATION} if not set. - *

- * {@link #DEFAULT_DFS_REPLICATION} value indicates that user has explicitly not set any - * block replication factor for this CF, hence use the default replication factor set in - * the file system. - */ + @Override public short getDFSReplication() { - String rf = getValue(DFS_REPLICATION); - return rf == null ? DEFAULT_DFS_REPLICATION : Short.valueOf(rf); + return delegatee.getDFSReplication(); } /** * Set the replication factor to hfile(s) belonging to this family - * @param replication number of replicas the blocks(s) belonging to this CF should have, or + * @param value number of replicas the blocks(s) belonging to this CF should have, or * {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in the * filesystem * @return this (for chained invocation) */ - public HColumnDescriptor setDFSReplication(short replication) { - if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { - throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explicitly set."); - } - setValue(DFS_REPLICATION, Short.toString(replication)); + public HColumnDescriptor setDFSReplication(short value) { + getDelegateeForModification().setDFSReplication(value); return this; } - /** - * Return the storage policy in use by this family - *

- * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see - * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details - */ + @Override public String getStoragePolicy() { - return getValue(STORAGE_POLICY); + return delegatee.getStoragePolicy(); } /** * Set the storage policy for use with this family - * @param policy the policy to set, valid setting includes: "LAZY_PERSIST", + * @param value the policy to set, valid setting includes: "LAZY_PERSIST", * "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD" */ - public HColumnDescriptor setStoragePolicy(String policy) { - setValue(STORAGE_POLICY, policy); + public HColumnDescriptor setStoragePolicy(String value) { + getDelegateeForModification().setStoragePolicy(value); return this; } + + @Override + public Bytes getValue(Bytes key) { + return delegatee.getValue(key); + } + + protected ModifyableColumnFamilyDescriptor getDelegateeForModification() { + return delegatee; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index bf58d739411..5eb737b3eb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -20,10 +20,11 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -34,6 +35,8 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDesc import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; /** * HTableDescriptor contains the details about an HBase table such as the descriptors of @@ -67,7 +70,7 @@ public class HTableDescriptor implements TableDescriptor, ComparableHADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug */ public HTableDescriptor(final TableName name) { - this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP, Collections.EMPTY_MAP); + this(new ModifyableTableDescriptor(name)); } /** @@ -86,7 +89,16 @@ public class HTableDescriptor implements TableDescriptor, Comparable families, - Map values, Map configuration) { - this(new ModifyableTableDescriptor(name, families, values, configuration)); + this(new ModifyableTableDescriptor(name, desc)); } protected HTableDescriptor(ModifyableTableDescriptor delegatee) { @@ -146,17 +149,6 @@ public class HTableDescriptor implements TableDescriptor, Comparable getFamilies() { - return delegatee.getFamilies(); + return Stream.of(delegatee.getColumnFamilies()) + .map(this::toHColumnDescriptor) + .collect(Collectors.toList()); } /** @@ -578,7 +572,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable getFamiliesKeys() { - return delegatee.getFamiliesKeys(); + return delegatee.getColumnFamilyNames(); } /** @@ -645,23 +638,25 @@ public class HTableDescriptor implements TableDescriptor, Comparable new HColumnDescriptor[size]); } - /** * Returns the HColumnDescriptor for a specific column family with name as * specified by the parameter column. - * * @param column Column family name * @return Column descriptor for the passed family name or the family on * passed in column. + * @deprecated Use {@link #getColumnFamily(byte[])}. */ - @Override - public HColumnDescriptor getFamily(final byte [] column) { - return delegatee.getFamily(column); + @Deprecated + public HColumnDescriptor getFamily(final byte[] column) { + return toHColumnDescriptor(delegatee.getColumnFamily(column)); } @@ -674,7 +669,24 @@ public class HTableDescriptor implements TableDescriptor, Comparable kvs) throws IOException { - delegatee.addCoprocessor(className, jarFilePath, priority, kvs); + getDelegateeForModification().addCoprocessor(className, jarFilePath, priority, kvs); return this; } @@ -725,7 +737,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable getColumnFamilyNames() { + return delegatee.getColumnFamilyNames(); + } + + @Override + public boolean hasColumnFamily(byte[] name) { + return delegatee.hasColumnFamily(name); + } + + @Override + public ColumnFamilyDescriptor getColumnFamily(byte[] name) { + return delegatee.getColumnFamily(name); + } + + protected ModifyableTableDescriptor getDelegateeForModification() { + return delegatee; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 3d6370564e1..1a3cae21e95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.regex.Pattern; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerName; @@ -270,7 +269,7 @@ public interface AsyncAdmin { * @param columnFamily column family descriptor of column family to be added */ CompletableFuture addColumnFamily(final TableName tableName, - final HColumnDescriptor columnFamily); + final ColumnFamilyDescriptor columnFamily); /** * Delete a column family from a table. @@ -285,7 +284,7 @@ public interface AsyncAdmin { * @param columnFamily new column family descriptor to use */ CompletableFuture modifyColumnFamily(final TableName tableName, - final HColumnDescriptor columnFamily); + final ColumnFamilyDescriptor columnFamily); /** * Create a new namespace. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 987080f78ea..c972b4c1d20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -40,10 +40,10 @@ import com.google.common.annotations.VisibleForTesting; import io.netty.util.Timeout; import io.netty.util.TimerTask; +import java.util.stream.Stream; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -645,7 +645,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture addColumnFamily(TableName tableName, HColumnDescriptor columnFamily) { + public CompletableFuture addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) { return this. procedureCall( RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), @@ -662,7 +662,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin { @Override public CompletableFuture modifyColumnFamily(TableName tableName, - HColumnDescriptor columnFamily) { + ColumnFamilyDescriptor columnFamily) { return this. procedureCall( RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), @@ -1679,7 +1679,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin { Arrays.asList(tables).forEach( table -> { Map cfs = new HashMap<>(); - Arrays.asList(table.getColumnFamilies()).stream() + Stream.of(table.getColumnFamilies()) .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) .forEach(column -> { cfs.put(column.getNameAsString(), column.getScope()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java new file mode 100644 index 00000000000..68932491efa --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -0,0 +1,216 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.Comparator; +import java.util.Map; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * An ColumnFamilyDescriptor contains information about a column family such as the + * number of versions, compression settings, etc. + * + * It is used as input when creating a table or adding a column. + */ +@InterfaceAudience.Public +public interface ColumnFamilyDescriptor { + + @InterfaceAudience.Private + static final Comparator COMPARATOR + = (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { + int result = Bytes.compareTo(lhs.getName(), rhs.getName()); + if (result != 0) { + return result; + } + // punt on comparison for ordering, just calculate difference. + result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); + if (result != 0) { + return result; + } + return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); + }; + + /** + * @return The storefile/hfile blocksize for this column family. + */ + int getBlocksize(); + /** + * @return bloom filter type used for new StoreFiles in ColumnFamily + */ + BloomType getBloomFilterType(); + + /** + * @return Compression type setting. + */ + Compression.Algorithm getCompactionCompressionType(); + /** + * @return Compression type setting. + */ + Compression.Algorithm getCompressionType(); + /** + * @return an unmodifiable map. + */ + Map getConfiguration(); + /** + * @param key the key whose associated value is to be returned + * @return accessing the configuration value by key. + */ + String getConfigurationValue(String key); + /** + * @return replication factor set for this CF + */ + short getDFSReplication(); + /** + * @return the data block encoding algorithm used in block cache and + * optionally on disk + */ + DataBlockEncoding getDataBlockEncoding(); + /** + * @return Return the raw crypto key attribute for the family, or null if not set + */ + byte[] getEncryptionKey(); + + /** + * @return Return the encryption algorithm in use by this family + */ + String getEncryptionType(); + /** + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for + * for this column family + */ + MemoryCompactionPolicy getInMemoryCompaction(); + /** + * @return return the KeepDeletedCells + */ + KeepDeletedCells getKeepDeletedCells(); + /** + * @return maximum number of versions + */ + int getMaxVersions(); + /** + * @return The minimum number of versions to keep. + */ + int getMinVersions(); + /** + * Get the mob compact partition policy for this family + * @return MobCompactPartitionPolicy + */ + MobCompactPartitionPolicy getMobCompactPartitionPolicy(); + /** + * Gets the mob threshold of the family. + * If the size of a cell value is larger than this threshold, it's regarded as a mob. + * The default threshold is 1024*100(100K)B. + * @return The mob threshold. + */ + long getMobThreshold(); + /** + * @return a copy of Name of this column family + */ + byte[] getName(); + + /** + * @return Name of this column family + */ + String getNameAsString(); + + /** + * @return the scope tag + */ + int getScope(); + /** + * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see + * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. + * @return Return the storage policy in use by this family + */ + String getStoragePolicy(); + /** + * @return Time-to-live of cell contents, in seconds. + */ + int getTimeToLive(); + /** + * @param key The key. + * @return A clone value. Null if no mapping for the key + */ + Bytes getValue(Bytes key); + /** + * @param key The key. + * @return A clone value. Null if no mapping for the key + */ + byte[] getValue(byte[] key); + /** + * It clone all bytes of all elements. + * @return All values + */ + Map getValues(); + /** + * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX + * and BLOOM type blocks). + */ + boolean isBlockCacheEnabled(); + /** + * @return true if we should cache bloomfilter blocks on write + */ + boolean isCacheBloomsOnWrite(); + /** + * @return true if we should cache data blocks in the L1 cache (if block cache deploy has more + * than one tier; e.g. we are using CombinedBlockCache). + */ + boolean isCacheDataInL1(); + /** + * @return true if we should cache data blocks on write + */ + boolean isCacheDataOnWrite(); + /** + * @return true if we should cache index blocks on write + */ + boolean isCacheIndexesOnWrite(); + /** + * @return Whether KV tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. + */ + boolean isCompressTags(); + /** + * @return true if we should evict cached blocks from the blockcache on close + */ + boolean isEvictBlocksOnClose(); + /** + * @return True if we are to favor keeping all values for this column family in the + * HRegionServer cache. + */ + boolean isInMemory(); + /** + * Gets whether the mob is enabled for the family. + * @return True if the mob is enabled for the family. + */ + boolean isMobEnabled(); + /** + * @return true if we should prefetch blocks into the blockcache on open + */ + boolean isPrefetchBlocksOnOpen(); + + /** + * @return Column family descriptor with only the customized attributes. + */ + String toStringCustomizedValues(); +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java new file mode 100644 index 00000000000..bb302dbc856 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -0,0 +1,1347 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import com.google.common.base.Preconditions; +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PrettyPrinter; +import org.apache.hadoop.hbase.util.PrettyPrinter.Unit; + +@InterfaceAudience.Public +public class ColumnFamilyDescriptorBuilder { + // For future backward compatibility + + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding + // Version 10 -- change metadata to standard type. + // Version 11 -- add column family level configuration. + private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; + + @InterfaceAudience.Private + public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; + private static final Bytes IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); + + @InterfaceAudience.Private + public static final String IN_MEMORY = HConstants.IN_MEMORY; + private static final Bytes IN_MEMORY_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY)); + + // These constants are used as FileInfo keys + @InterfaceAudience.Private + public static final String COMPRESSION = "COMPRESSION"; + private static final Bytes COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION)); + @InterfaceAudience.Private + public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; + private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + @InterfaceAudience.Private + public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; + private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); + /** + * Key for the BLOCKCACHE attribute. A more exact name would be + * CACHE_DATA_ON_READ because this flag sets whether or not we cache DATA + * blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot + * be disabled. + */ + @InterfaceAudience.Private + public static final String BLOCKCACHE = "BLOCKCACHE"; + private static final Bytes BLOCKCACHE_BYTES = new Bytes(Bytes.toBytes(BLOCKCACHE)); + @InterfaceAudience.Private + public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; + private static final Bytes CACHE_DATA_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); + @InterfaceAudience.Private + public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; + private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); + @InterfaceAudience.Private + public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; + private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); + @InterfaceAudience.Private + public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; + private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); + /** + * Key for cache data into L1 if cache is set up with more than one tier. To + * set in the shell, do something like this: hbase(main):003:0> create 't', + * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}} + */ + @InterfaceAudience.Private + public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1"; + private static final Bytes CACHE_DATA_IN_L1_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_IN_L1)); + + /** + * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, + * and DATA blocks of HFiles belonging to this family will be loaded into the + * cache as soon as the file is opened. These loads will not count as cache + * misses. + */ + @InterfaceAudience.Private + public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; + private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); + + /** + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. + * Use smaller block sizes for faster random-access at expense of larger + * indices (more memory consumption). Note that this is a soft limit and that + * blocks have overhead (metadata, CRCs) so blocks will tend to be the size + * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k + * means hbase data will align with an SSDs 4k page accesses (TODO). + */ + @InterfaceAudience.Private + public static final String BLOCKSIZE = "BLOCKSIZE"; + private static final Bytes BLOCKSIZE_BYTES = new Bytes(Bytes.toBytes(BLOCKSIZE)); + + @InterfaceAudience.Private + public static final String TTL = "TTL"; + private static final Bytes TTL_BYTES = new Bytes(Bytes.toBytes(TTL)); + @InterfaceAudience.Private + public static final String BLOOMFILTER = "BLOOMFILTER"; + private static final Bytes BLOOMFILTER_BYTES = new Bytes(Bytes.toBytes(BLOOMFILTER)); + @InterfaceAudience.Private + public static final String REPLICATION_SCOPE = "REPLICATION_SCOPE"; + private static final Bytes REPLICATION_SCOPE_BYTES = new Bytes(Bytes.toBytes(REPLICATION_SCOPE)); + @InterfaceAudience.Private + public static final String MAX_VERSIONS = HConstants.VERSIONS; + private static final Bytes MAX_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MAX_VERSIONS)); + @InterfaceAudience.Private + public static final String MIN_VERSIONS = "MIN_VERSIONS"; + private static final Bytes MIN_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MIN_VERSIONS)); + /** + * Retain all cells across flushes and compactions even if they fall behind a + * delete tombstone. To see all retained cells, do a 'raw' scan; see + * Scan#setRaw or pass RAW => true attribute in the shell. + */ + @InterfaceAudience.Private + public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; + private static final Bytes KEEP_DELETED_CELLS_BYTES = new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); + @InterfaceAudience.Private + public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; + private static final Bytes COMPRESS_TAGS_BYTES = new Bytes(Bytes.toBytes(COMPRESS_TAGS)); + @InterfaceAudience.Private + public static final String ENCRYPTION = "ENCRYPTION"; + private static final Bytes ENCRYPTION_BYTES = new Bytes(Bytes.toBytes(ENCRYPTION)); + @InterfaceAudience.Private + public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY"; + private static final Bytes ENCRYPTION_KEY_BYTES = new Bytes(Bytes.toBytes(ENCRYPTION_KEY)); + + private static final boolean DEFAULT_MOB = false; + @InterfaceAudience.Private + public static final String IS_MOB = "IS_MOB"; + private static final Bytes IS_MOB_BYTES = new Bytes(Bytes.toBytes(IS_MOB)); + @InterfaceAudience.Private + public static final String MOB_THRESHOLD = "MOB_THRESHOLD"; + private static final Bytes MOB_THRESHOLD_BYTES = new Bytes(Bytes.toBytes(MOB_THRESHOLD)); + public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k + @InterfaceAudience.Private + public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; + private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY + = MobCompactPartitionPolicy.DAILY; + @InterfaceAudience.Private + public static final String DFS_REPLICATION = "DFS_REPLICATION"; + private static final Bytes DFS_REPLICATION_BYTES = new Bytes(Bytes.toBytes(DFS_REPLICATION)); + public static final short DEFAULT_DFS_REPLICATION = 0; + @InterfaceAudience.Private + public static final String STORAGE_POLICY = "STORAGE_POLICY"; + private static final Bytes STORAGE_POLICY_BYTES = new Bytes(Bytes.toBytes(STORAGE_POLICY)); + + /** + * Default compression type. + */ + public static final Compression.Algorithm DEFAULT_COMPRESSION = Compression.Algorithm.NONE; + + /** + * Default data block encoding algorithm. + */ + public static final DataBlockEncoding DEFAULT_DATA_BLOCK_ENCODING = DataBlockEncoding.NONE; + + /** + * Default number of versions of a record to keep. + */ + public static final int DEFAULT_MAX_VERSIONS = 1; + + /** + * Default is not to keep a minimum of versions. + */ + public static final int DEFAULT_MIN_VERSIONS = 0; + + /** + * Default setting for whether to try and serve this column family from memory + * or not. + */ + public static final boolean DEFAULT_IN_MEMORY = false; + + /** + * Default setting for preventing deleted from being collected immediately. + */ + public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE; + + /** + * Default setting for whether to use a block cache or not. + */ + public static final boolean DEFAULT_BLOCKCACHE = true; + + /** + * Default setting for whether to cache data blocks on write if block caching + * is enabled. + */ + public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; + + /** + * Default setting for whether to cache data blocks in L1 tier. Only makes + * sense if more than one tier in operations: i.e. if we have an L1 and a L2. + * This will be the cases if we are using BucketCache. + */ + public static final boolean DEFAULT_CACHE_DATA_IN_L1 = false; + + /** + * Default setting for whether to cache index blocks on write if block caching + * is enabled. + */ + public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; + + /** + * Default size of blocks in files stored to the filesytem (hfiles). + */ + public static final int DEFAULT_BLOCKSIZE = HConstants.DEFAULT_BLOCKSIZE; + + /** + * Default setting for whether or not to use bloomfilters. + */ + public static final BloomType DEFAULT_BLOOMFILTER = BloomType.ROW; + + /** + * Default setting for whether to cache bloom filter blocks on write if block + * caching is enabled. + */ + public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; + + /** + * Default time to live of cell contents. + */ + public static final int DEFAULT_TTL = HConstants.FOREVER; + + /** + * Default scope. + */ + public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; + + /** + * Default setting for whether to evict cached blocks from the blockcache on + * close. + */ + public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; + + /** + * Default compress tags along with any type of DataBlockEncoding. + */ + public static final boolean DEFAULT_COMPRESS_TAGS = true; + + /* + * Default setting for whether to prefetch blocks into the blockcache on open. + */ + public static final boolean DEFAULT_PREFETCH_BLOCKS_ON_OPEN = false; + + private final static Map DEFAULT_VALUES = new HashMap<>(); + + private static Map getDefaultValuesBytes() { + Map values = new HashMap<>(); + DEFAULT_VALUES.forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); + return values; + } + + public static Map getDefaultValues() { + return Collections.unmodifiableMap(DEFAULT_VALUES); + } + + private final static Set RESERVED_KEYWORDS = new HashSet<>(); + + static { + DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER.name()); + DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE)); + DEFAULT_VALUES.put(MAX_VERSIONS, String.valueOf(DEFAULT_MAX_VERSIONS)); + DEFAULT_VALUES.put(MIN_VERSIONS, String.valueOf(DEFAULT_MIN_VERSIONS)); + DEFAULT_VALUES.put(COMPRESSION, DEFAULT_COMPRESSION.name()); + DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); + DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); + DEFAULT_VALUES.put(IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); + DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); + DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); + DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); + DEFAULT_VALUES.put(CACHE_DATA_ON_WRITE, String.valueOf(DEFAULT_CACHE_DATA_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_DATA_IN_L1, String.valueOf(DEFAULT_CACHE_DATA_IN_L1)); + DEFAULT_VALUES.put(CACHE_INDEX_ON_WRITE, String.valueOf(DEFAULT_CACHE_INDEX_ON_WRITE)); + DEFAULT_VALUES.put(CACHE_BLOOMS_ON_WRITE, String.valueOf(DEFAULT_CACHE_BLOOMS_ON_WRITE)); + DEFAULT_VALUES.put(EVICT_BLOCKS_ON_CLOSE, String.valueOf(DEFAULT_EVICT_BLOCKS_ON_CLOSE)); + DEFAULT_VALUES.put(PREFETCH_BLOCKS_ON_OPEN, String.valueOf(DEFAULT_PREFETCH_BLOCKS_ON_OPEN)); + DEFAULT_VALUES.keySet().forEach(s -> RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s)))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(IS_MOB))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_THRESHOLD))); + RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY))); + } + + public static Unit getUnit(String key) { + /* TTL for now, we can add more as we need */ + switch (key) { + case TTL: + return Unit.TIME_INTERVAL; + default: + return Unit.NONE; + } + } + + /** + * @param b Family name. + * @return b + * @throws IllegalArgumentException If not null and not a legitimate family + * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because + * b can be null when deserializing). Cannot start with a '.' + * either. Also Family can not be an empty value or equal "recovered.edits". + */ + public static byte[] isLegalColumnFamilyName(final byte[] b) { + if (b == null) { + return null; + } + Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty"); + if (b[0] == '.') { + throw new IllegalArgumentException("Column Family names cannot start with a " + + "period: " + Bytes.toString(b)); + } + for (int i = 0; i < b.length; i++) { + if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { + throw new IllegalArgumentException("Illegal character <" + b[i] + + ">. Column Family names cannot contain control characters or colons: " + + Bytes.toString(b)); + } + } + byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); + if (Bytes.equals(recoveredEdit, b)) { + throw new IllegalArgumentException("Column Family name cannot be: " + + HConstants.RECOVERED_EDITS_DIR); + } + return b; + } + + private final ModifyableColumnFamilyDescriptor desc; + + public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) throws DeserializationException { + return ModifyableColumnFamilyDescriptor.parseFrom(pbBytes); + } + + public static ColumnFamilyDescriptorBuilder newBuilder(final byte[] name) { + return new ColumnFamilyDescriptorBuilder(name); + } + + public static ColumnFamilyDescriptorBuilder newBuilder(final ColumnFamilyDescriptor desc) { + return new ColumnFamilyDescriptorBuilder(desc); + } + + public static ColumnFamilyDescriptor copy(ColumnFamilyDescriptor desc) { + return new ModifyableColumnFamilyDescriptor(desc); + } + + private ColumnFamilyDescriptorBuilder(final byte[] name) { + this.desc = new ModifyableColumnFamilyDescriptor(name); + } + + private ColumnFamilyDescriptorBuilder(final ColumnFamilyDescriptor desc) { + this.desc = new ModifyableColumnFamilyDescriptor(desc); + } + + /** + * @param desc The table descriptor to serialize + * @return This instance serialized with pb with pb magic prefix + */ + public static byte[] toByteArray(ColumnFamilyDescriptor desc) { + if (desc instanceof ModifyableColumnFamilyDescriptor) { + return ((ModifyableColumnFamilyDescriptor) desc).toByteArray(); + } + return new ModifyableColumnFamilyDescriptor(desc).toByteArray(); + } + + public ColumnFamilyDescriptor build() { + return new ModifyableColumnFamilyDescriptor(desc); + } + + public ColumnFamilyDescriptorBuilder removeConfiguration(String key) { + desc.removeConfiguration(key); + return this; + } + + public ColumnFamilyDescriptorBuilder setBlockCacheEnabled(boolean value) { + desc.setBlockCacheEnabled(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setBlocksize(int value) { + desc.setBlocksize(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setBloomFilterType(final BloomType value) { + desc.setBloomFilterType(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCacheBloomsOnWrite(boolean value) { + desc.setCacheBloomsOnWrite(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCacheDataInL1(boolean value) { + desc.setCacheDataInL1(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCacheDataOnWrite(boolean value) { + desc.setCacheDataOnWrite(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCacheIndexesOnWrite(final boolean value) { + desc.setCacheIndexesOnWrite(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCompactionCompressionType(Compression.Algorithm value) { + desc.setCompactionCompressionType(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCompressTags(boolean value) { + desc.setCompressTags(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setCompressionType(Compression.Algorithm value) { + desc.setCompressionType(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setConfiguration(final String key, final String value) { + desc.setConfiguration(key, value); + return this; + } + + public ColumnFamilyDescriptorBuilder setDFSReplication(short value) { + desc.setDFSReplication(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setDataBlockEncoding(DataBlockEncoding value) { + desc.setDataBlockEncoding(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setEncryptionKey(final byte[] value) { + desc.setEncryptionKey(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setEncryptionType(String value) { + desc.setEncryptionType(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setEvictBlocksOnClose(boolean value) { + desc.setEvictBlocksOnClose(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setInMemory(final boolean value) { + desc.setInMemory(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setInMemoryCompaction(final MemoryCompactionPolicy value) { + desc.setInMemoryCompaction(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setKeepDeletedCells(KeepDeletedCells value) { + desc.setKeepDeletedCells(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMaxVersions(final int value) { + desc.setMaxVersions(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMinVersions(final int value) { + desc.setMinVersions(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { + desc.setMobCompactPartitionPolicy(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMobEnabled(final boolean value) { + desc.setMobEnabled(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setMobThreshold(final long value) { + desc.setMobThreshold(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setPrefetchBlocksOnOpen(final boolean value) { + desc.setPrefetchBlocksOnOpen(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setScope(final int value) { + desc.setScope(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setStoragePolicy(final String value) { + desc.setStoragePolicy(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setTimeToLive(final int value) { + desc.setTimeToLive(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setTimeToLive(final String value) throws HBaseException { + desc.setTimeToLive(value); + return this; + } + + public ColumnFamilyDescriptorBuilder setValue(final Bytes key, final Bytes value) { + desc.setValue(key, value); + return this; + } + + public ColumnFamilyDescriptorBuilder setValue(final byte[] key, final byte[] value) { + desc.setValue(key, value); + return this; + } + + /** + * An ModifyableFamilyDescriptor contains information about a column family such as the + * number of versions, compression settings, etc. + * + * It is used as input when creating a table or adding a column. + * TODO: make this package-private after removing the HColumnDescriptor + */ + @InterfaceAudience.Private + public static class ModifyableColumnFamilyDescriptor + implements ColumnFamilyDescriptor, Comparable { + + // Column family name + private final byte[] name; + + // Column metadata + private final Map values = new HashMap<>(); + + /** + * A map which holds the configuration specific to the column family. The + * keys of the map have the same names as config keys and override the + * defaults with cf-specific settings. Example usage may be for compactions, + * etc. + */ + private final Map configuration = new HashMap<>(); + + /** + * Construct a column descriptor specifying only the family name The other + * attributes are defaulted. + * + * @param name Column family name. Must be 'printable' -- digit or + * letter -- and may not contain a : + * TODO: make this private after the HCD is removed. + */ + @InterfaceAudience.Private + public ModifyableColumnFamilyDescriptor(final byte[] name) { + this(isLegalColumnFamilyName(name), getDefaultValuesBytes(), Collections.EMPTY_MAP); + } + + /** + * Constructor. Makes a deep copy of the supplied descriptor. + * TODO: make this private after the HCD is removed. + * @param desc The descriptor. + */ + @InterfaceAudience.Private + public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) { + this(desc.getName(), desc.getValues(), desc.getConfiguration()); + } + + private ModifyableColumnFamilyDescriptor(byte[] name, Map values, Map config) { + this.name = name; + this.values.putAll(values); + this.configuration.putAll(config); + } + + @Override + public byte[] getName() { + return Bytes.copy(name); + } + + @Override + public String getNameAsString() { + return Bytes.toString(name); + } + + @Override + public Bytes getValue(Bytes key) { + return values.get(key); + } + + @Override + public byte[] getValue(byte[] key) { + Bytes value = values.get(new Bytes(key)); + return value == null ? null : value.get(); + } + + @Override + public Map getValues() { + return Collections.unmodifiableMap(values); + } + + /** + * @param key The key. + * @param value The value. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { + return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); + } + + public ModifyableColumnFamilyDescriptor setValue(String key, String value) { + return setValue(toBytesOrNull(key, Bytes::toBytes), toBytesOrNull(value, Bytes::toBytes)); + } + + private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { + return setValue(key, toBytesOrNull(value, Bytes::toBytes)); + } + /** + * @param key The key. + * @param value The value. + * @return this (for chained invocation) + */ + private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { + if (value == null) { + values.remove(key); + } else { + values.put(key, value); + } + return this; + } + + /** + * + * @param key Key whose key and value we're to remove from HCD parameters. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor removeValue(final Bytes key) { + return setValue(key, (Bytes) null); + } + + private static Bytes toBytesOrNull(T t, Function f) { + if (t == null) { + return null; + } else { + return new Bytes(f.apply(t)); + } + } + + private T getStringOrDefault(Bytes key, Function function, T defaultValue) { + return getOrDefault(key, b -> function.apply(Bytes.toString(b)), defaultValue); + } + + private T getOrDefault(Bytes key, Function function, T defaultValue) { + Bytes value = values.get(key); + if (value == null) { + return defaultValue; + } else { + return function.apply(value.get()); + } + } + + @Override + public int getMaxVersions() { + return getStringOrDefault(MAX_VERSIONS_BYTES, Integer::parseInt, DEFAULT_MAX_VERSIONS); + } + + /** + * @param maxVersions maximum number of versions + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { + if (maxVersions <= 0) { + // TODO: Allow maxVersion of 0 to be the way you say "Keep all versions". + // Until there is support, consider 0 or < 0 -- a configuration error. + throw new IllegalArgumentException("Maximum versions must be positive"); + } + if (maxVersions < this.getMinVersions()) { + throw new IllegalArgumentException("Set MaxVersion to " + maxVersions + + " while minVersion is " + this.getMinVersions() + + ". Maximum versions must be >= minimum versions "); + } + setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); + return this; + } + + /** + * Set minimum and maximum versions to keep + * + * @param minVersions minimal number of versions + * @param maxVersions maximum number of versions + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVersions) { + if (minVersions <= 0) { + // TODO: Allow minVersion and maxVersion of 0 to be the way you say "Keep all versions". + // Until there is support, consider 0 or < 0 -- a configuration error. + throw new IllegalArgumentException("Minimum versions must be positive"); + } + + if (maxVersions < minVersions) { + throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions + + " and set MinVersion to " + minVersions + + ", as maximum versions must be >= minimum versions."); + } + setMinVersions(minVersions); + setMaxVersions(maxVersions); + return this; + } + + + @Override + public int getBlocksize() { + return getStringOrDefault(BLOCKSIZE_BYTES, Integer::valueOf, DEFAULT_BLOCKSIZE); + } + + /** + * @param s Blocksize to use when writing out storefiles/hfiles on this + * column family. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setBlocksize(int s) { + return setValue(BLOCKSIZE_BYTES, Integer.toString(s)); + } + + @Override + public Compression.Algorithm getCompressionType() { + return getStringOrDefault(COMPRESSION_BYTES, + Compression.Algorithm::valueOf, + DEFAULT_COMPRESSION); + } + + /** + * Compression types supported in hbase. LZO is not bundled as part of the + * hbase distribution. See + * LZO + * Compression + * for how to enable it. + * + * @param type Compression type setting. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) { + return setValue(COMPRESSION_BYTES, type.name()); + } + + @Override + public DataBlockEncoding getDataBlockEncoding() { + return getStringOrDefault(DATA_BLOCK_ENCODING_BYTES, + DataBlockEncoding::valueOf, + DataBlockEncoding.NONE); + } + + /** + * Set data block encoding algorithm used in block cache. + * + * @param type What kind of data block encoding will be used. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) { + return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name()); + } + + /** + * Set whether the tags should be compressed along with DataBlockEncoding. + * When no DataBlockEncoding is been used, this is having no effect. + * + * @param compressTags + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { + return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags)); + } + + @Override + public boolean isCompressTags() { + return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, + DEFAULT_COMPRESS_TAGS); + } + + @Override + public Compression.Algorithm getCompactionCompressionType() { + return getStringOrDefault(COMPRESSION_COMPACT_BYTES, + Compression.Algorithm::valueOf, + getCompressionType()); + } + + /** + * Compression types supported in hbase. LZO is not bundled as part of the + * hbase distribution. See + * LZO + * Compression + * for how to enable it. + * + * @param type Compression type setting. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCompactionCompressionType( + Compression.Algorithm type) { + return setValue(COMPRESSION_COMPACT_BYTES, type.name()); + } + + @Override + public boolean isInMemory() { + return getStringOrDefault(IN_MEMORY_BYTES, Boolean::valueOf, DEFAULT_IN_MEMORY); + } + + /** + * @param inMemory True if we are to favor keeping all values for this + * column family in the HRegionServer cache + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { + return setValue(IN_MEMORY_BYTES, Boolean.toString(inMemory)); + } + + @Override + public MemoryCompactionPolicy getInMemoryCompaction() { + return getStringOrDefault(IN_MEMORY_COMPACTION_BYTES, MemoryCompactionPolicy::valueOf, null); + } + + /** + * @param inMemoryCompaction the prefered in-memory compaction policy for + * this column family + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { + return setValue(IN_MEMORY_COMPACTION_BYTES, inMemoryCompaction.name()); + } + + @Override + public KeepDeletedCells getKeepDeletedCells() { + return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, KeepDeletedCells::valueOf, DEFAULT_KEEP_DELETED); + } + + /** + * @param keepDeletedCells True if deleted rows should not be collected + * immediately. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { + return setValue(KEEP_DELETED_CELLS_BYTES, keepDeletedCells.name()); + } + + @Override + public int getTimeToLive() { + return getStringOrDefault(TTL_BYTES, Integer::parseInt, DEFAULT_TTL); + } + + /** + * @param timeToLive Time-to-live of cell contents, in seconds. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setTimeToLive(int timeToLive) { + return setValue(TTL_BYTES, Integer.toString(timeToLive)); + } + + /** + * @param timeToLive Time-to-live of cell contents, in seconds. + * @return this (for chained invocation) + * @throws org.apache.hadoop.hbase.exceptions.HBaseException + */ + public ModifyableColumnFamilyDescriptor setTimeToLive(String timeToLive) throws HBaseException { + return setTimeToLive(Integer.parseInt(PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL))); + } + + @Override + public int getMinVersions() { + return getStringOrDefault(MIN_VERSIONS_BYTES, Integer::valueOf, DEFAULT_MIN_VERSIONS); + } + + /** + * @param minVersions The minimum number of versions to keep. (used when + * timeToLive is set) + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { + return setValue(MIN_VERSIONS_BYTES, Integer.toString(minVersions)); + } + + @Override + public boolean isBlockCacheEnabled() { + return getStringOrDefault(BLOCKCACHE_BYTES, Boolean::valueOf, DEFAULT_BLOCKCACHE); + } + + /** + * @param blockCacheEnabled True if hfile DATA type blocks should be cached + * (We always cache INDEX and BLOOM blocks; you cannot turn this off). + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { + return setValue(BLOCKCACHE_BYTES, Boolean.toString(blockCacheEnabled)); + } + + @Override + public BloomType getBloomFilterType() { + return getStringOrDefault(BLOOMFILTER_BYTES, BloomType::valueOf, DEFAULT_BLOOMFILTER); + } + + public ModifyableColumnFamilyDescriptor setBloomFilterType(final BloomType bt) { + return setValue(BLOOMFILTER_BYTES, bt.name()); + } + + @Override + public int getScope() { + return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, DEFAULT_REPLICATION_SCOPE); + } + + /** + * @param scope the scope tag + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setScope(int scope) { + return setValue(REPLICATION_SCOPE_BYTES, Integer.toString(scope)); + } + + @Override + public boolean isCacheDataOnWrite() { + return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_ON_WRITE); + } + + /** + * @param value true if we should cache data blocks on write + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) { + return setValue(CACHE_DATA_ON_WRITE_BYTES, Boolean.toString(value)); + } + + @Override + public boolean isCacheDataInL1() { + return getStringOrDefault(CACHE_DATA_IN_L1_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_IN_L1); + } + + /** + * @param value true if we should cache data blocks in the L1 cache (if + * block cache deploy has more than one tier; e.g. we are using + * CombinedBlockCache). + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCacheDataInL1(boolean value) { + return setValue(CACHE_DATA_IN_L1_BYTES, Boolean.toString(value)); + } + + @Override + public boolean isCacheIndexesOnWrite() { + return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); + } + + /** + * @param value true if we should cache index blocks on write + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCacheIndexesOnWrite(boolean value) { + return setValue(CACHE_INDEX_ON_WRITE_BYTES, Boolean.toString(value)); + } + + @Override + public boolean isCacheBloomsOnWrite() { + return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_BLOOMS_ON_WRITE); + } + + /** + * @param value true if we should cache bloomfilter blocks on write + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) { + return setValue(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean.toString(value)); + } + + @Override + public boolean isEvictBlocksOnClose() { + return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + } + + /** + * @param value true if we should evict cached blocks from the blockcache on + * close + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { + return setValue(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean.toString(value)); + } + + @Override + public boolean isPrefetchBlocksOnOpen() { + return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + } + + /** + * @param value true if we should prefetch blocks into the blockcache on + * open + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { + return setValue(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean.toString(value)); + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(getNameAsString()); + s.append("'"); + s.append(getValues(true)); + s.append('}'); + return s.toString(); + } + + + @Override + public String toStringCustomizedValues() { + StringBuilder s = new StringBuilder(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(getNameAsString()); + s.append("'"); + s.append(getValues(false)); + s.append('}'); + return s.toString(); + } + + private StringBuilder getValues(boolean printDefaults) { + StringBuilder s = new StringBuilder(); + + boolean hasConfigKeys = false; + + // print all reserved keys first + for (Map.Entry entry : values.entrySet()) { + if (!RESERVED_KEYWORDS.contains(entry.getKey())) { + hasConfigKeys = true; + continue; + } + String key = Bytes.toString(entry.getKey().get()); + String value = Bytes.toStringBinary(entry.getValue().get()); + if (printDefaults + || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + s.append(", "); + s.append(key); + s.append(" => "); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); + } + } + + // print all non-reserved, advanced config keys as a separate subset + if (hasConfigKeys) { + s.append(", "); + s.append(HConstants.METADATA).append(" => "); + s.append('{'); + boolean printComma = false; + for (Bytes k : values.keySet()) { + if (RESERVED_KEYWORDS.contains(k)) { + continue; + } + String key = Bytes.toString(k.get()); + String value = Bytes.toStringBinary(values.get(k).get()); + if (printComma) { + s.append(", "); + } + printComma = true; + s.append('\'').append(key).append('\''); + s.append(" => "); + s.append('\'').append(PrettyPrinter.format(value, getUnit(key))).append('\''); + } + s.append('}'); + } + + if (!configuration.isEmpty()) { + s.append(", "); + s.append(HConstants.CONFIGURATION).append(" => "); + s.append('{'); + boolean printCommaForConfiguration = false; + for (Map.Entry e : configuration.entrySet()) { + if (printCommaForConfiguration) { + s.append(", "); + } + printCommaForConfiguration = true; + s.append('\'').append(e.getKey()).append('\''); + s.append(" => "); + s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); + } + s.append("}"); + } + return s; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof ModifyableColumnFamilyDescriptor)) { + return false; + } + return compareTo((ModifyableColumnFamilyDescriptor) obj) == 0; + } + + @Override + public int hashCode() { + int result = Bytes.hashCode(name); + result ^= (int) COLUMN_DESCRIPTOR_VERSION; + result ^= values.hashCode(); + result ^= configuration.hashCode(); + return result; + } + + @Override + public int compareTo(ModifyableColumnFamilyDescriptor other) { + return COMPARATOR.compare(this, other); + } + + /** + * @return This instance serialized with pb with pb magic prefix + * @see #parseFrom(byte[]) + */ + private byte[] toByteArray() { + return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this) + .toByteArray()); + } + + /** + * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb + * magic prefix + * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from + * bytes + * @throws DeserializationException + * @see #toByteArray() + */ + private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { + if (!ProtobufUtil.isPBMagicPrefix(bytes)) { + throw new DeserializationException("No magic"); + } + int pblen = ProtobufUtil.lengthOfPBMagic(); + ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); + ColumnFamilySchema cfs = null; + try { + ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); + cfs = builder.build(); + } catch (IOException e) { + throw new DeserializationException(e); + } + return ProtobufUtil.convertToColumnDesc(cfs); + } + + @Override + public String getConfigurationValue(String key) { + return configuration.get(key); + } + + @Override + public Map getConfiguration() { + // shallow pointer copy + return Collections.unmodifiableMap(configuration); + } + + /** + * Setter for storing a configuration setting in {@link #configuration} map. + * + * @param key Config key. Same as XML config key e.g. + * hbase.something.or.other. + * @param value String value. If null, removes the configuration. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) { + if (value == null) { + configuration.remove(key); + } else { + configuration.put(key, value); + } + return this; + } + + /** + * Remove a configuration setting represented by the key from the + * {@link #configuration} map. + * + * @param key + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) { + return setConfiguration(key, null); + } + + @Override + public String getEncryptionType() { + return getStringOrDefault(ENCRYPTION_BYTES, Function.identity(), null); + } + + /** + * Set the encryption algorithm for use with this family + * + * @param algorithm + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) { + return setValue(ENCRYPTION_BYTES, algorithm); + } + + @Override + public byte[] getEncryptionKey() { + return getOrDefault(ENCRYPTION_KEY_BYTES, Bytes::copy, null); + } + + /** + * Set the raw crypto key attribute for the family + * + * @param keyBytes + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) { + return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes)); + } + + @Override + public long getMobThreshold() { + return getStringOrDefault(MOB_THRESHOLD_BYTES, Long::valueOf, DEFAULT_MOB_THRESHOLD); + } + + /** + * Sets the mob threshold of the family. + * + * @param threshold The mob threshold. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setMobThreshold(long threshold) { + return setValue(MOB_THRESHOLD_BYTES, String.valueOf(threshold)); + } + + @Override + public boolean isMobEnabled() { + return getStringOrDefault(IS_MOB_BYTES, Boolean::valueOf, DEFAULT_MOB); + } + + /** + * Enables the mob for the family. + * + * @param isMobEnabled Whether to enable the mob for the family. + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setMobEnabled(boolean isMobEnabled) { + return setValue(IS_MOB_BYTES, String.valueOf(isMobEnabled)); + } + + @Override + public MobCompactPartitionPolicy getMobCompactPartitionPolicy() { + return getStringOrDefault(MOB_COMPACT_PARTITION_POLICY_BYTES, + MobCompactPartitionPolicy::valueOf, DEFAULT_MOB_COMPACT_PARTITION_POLICY); + } + + /** + * Set the mob compact partition policy for the family. + * + * @param policy policy type + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { + return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name()); + } + + @Override + public short getDFSReplication() { + return getStringOrDefault(DFS_REPLICATION_BYTES, + Short::valueOf, DEFAULT_DFS_REPLICATION); + } + + /** + * Set the replication factor to hfile(s) belonging to this family + * + * @param replication number of replicas the blocks(s) belonging to this CF + * should have, or {@link #DEFAULT_DFS_REPLICATION} for the default + * replication factor set in the filesystem + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) { + if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { + throw new IllegalArgumentException( + "DFS replication factor cannot be less than 1 if explicitly set."); + } + return setValue(DFS_REPLICATION_BYTES, Short.toString(replication)); + } + + @Override + public String getStoragePolicy() { + return getStringOrDefault(STORAGE_POLICY_BYTES, Function.identity(), null); + } + + /** + * Set the storage policy for use with this family + * + * @param policy the policy to set, valid setting includes: + * "LAZY_PERSIST", + * "ALL_SSD", "ONE_SSD", "HOT", "WARM", + * "COLD" + * @return this (for chained invocation) + */ + public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) { + return setValue(STORAGE_POLICY_BYTES, policy); + } + + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index d9bd75e3492..1c6ea03b0c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -29,7 +29,6 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.Callable; @@ -4166,7 +4165,7 @@ public class HBaseAdmin implements Admin { * @throws IOException if a remote or network exception occurs */ private void setTableRep(final TableName tableName, boolean enableRep) throws IOException { - HTableDescriptor htd = getTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(getTableDescriptor(tableName)); ReplicationState currentReplicationState = getTableReplicationState(htd); if (enableRep && currentReplicationState != ReplicationState.ENABLED || !enableRep && currentReplicationState != ReplicationState.DISABLED) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java new file mode 100644 index 00000000000..c8d34ff957d --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; + +/** + * Read-only column descriptor. + */ +@Deprecated // deprecated for hbase 2.0, remove for hbase 3.0. see HColumnDescriptor. +@InterfaceAudience.Private +public class ImmutableHColumnDescriptor extends HColumnDescriptor { + /* + * Create an unmodifyable copy of an HColumnDescriptor + * @param desc + */ + ImmutableHColumnDescriptor(final HColumnDescriptor desc) { + super(desc, false); + } + + ImmutableHColumnDescriptor(final ModifyableColumnFamilyDescriptor desc) { + super(desc); + } + + @Override + protected ModifyableColumnFamilyDescriptor getDelegateeForModification() { + throw new UnsupportedOperationException("HColumnDescriptor is read-only"); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java index 89d1291330f..4e9e9afc3cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java @@ -18,11 +18,11 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; /** * Read-only table descriptor. @@ -31,49 +31,28 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Public public class ImmutableHTableDescriptor extends HTableDescriptor { + @Override + protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc) { + if (desc == null) { + return null; + } else if (desc instanceof ModifyableColumnFamilyDescriptor) { + return new ImmutableHColumnDescriptor((ModifyableColumnFamilyDescriptor) desc); + } else if (desc instanceof HColumnDescriptor) { + return new ImmutableHColumnDescriptor((HColumnDescriptor) desc); + } else { + return new ImmutableHColumnDescriptor(new ModifyableColumnFamilyDescriptor(desc)); + } + } /* * Create an unmodifyable copy of an HTableDescriptor * @param desc */ public ImmutableHTableDescriptor(final HTableDescriptor desc) { - super(new UnmodifyableTableDescriptor(desc)); + super(desc, false); } - @Deprecated // deprecated for hbase 2.0, remove for hbase 3.0. see HTableDescriptor. - private static class UnmodifyableTableDescriptor extends ModifyableTableDescriptor { - - UnmodifyableTableDescriptor(final TableDescriptor desc) { - super(desc); - } - - @Override - protected ModifyableTableDescriptor setFamily(HColumnDescriptor family) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - @Override - public HColumnDescriptor removeFamily(final byte[] column) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - @Override - public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - @Override - public void remove(Bytes key) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - @Override - public ModifyableTableDescriptor setConfiguration(String key, String value) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } - - @Override - public void removeConfiguration(final String key) { - throw new UnsupportedOperationException("HTableDescriptor is read-only"); - } + @Override + protected ModifyableTableDescriptor getDelegateeForModification() { + throw new UnsupportedOperationException("HTableDescriptor is read-only"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index 6f7e20f1046..fb782569cff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -18,11 +18,13 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Arrays; import java.util.Collection; +import java.util.Comparator; +import java.util.Iterator; import java.util.Map; import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; @@ -36,15 +38,34 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Public public interface TableDescriptor { - /** - * Returns an array all the {@link HColumnDescriptor} of the column families - * of the table. - * - * @return Array of all the HColumnDescriptors of the current table - * - * @see #getFamilies() - */ - HColumnDescriptor[] getColumnFamilies(); + @InterfaceAudience.Private + static final Comparator COMPARATOR + = (TableDescriptor lhs, TableDescriptor rhs) -> { + int result = lhs.getTableName().compareTo(rhs.getTableName()); + if (result != 0) { + return result; + } + Collection lhsFamilies = Arrays.asList(lhs.getColumnFamilies()); + Collection rhsFamilies = Arrays.asList(rhs.getColumnFamilies()); + result = Integer.compare(lhsFamilies.size(), rhsFamilies.size()); + if (result != 0) { + return result; + } + + for (Iterator it = lhsFamilies.iterator(), + it2 = rhsFamilies.iterator(); it.hasNext();) { + result = ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()); + if (result != 0) { + return result; + } + } + // punt on comparison for ordering, just calculate difference + result = Integer.compare(lhs.getValues().hashCode(), rhs.getValues().hashCode()); + if (result != 0) { + return result; + } + return Integer.compare(lhs.getConfiguration().hashCode(), rhs.getConfiguration().hashCode()); + }; /** * Returns the count of the column families of the table. @@ -85,33 +106,33 @@ public interface TableDescriptor { Durability getDurability(); /** - * Returns an unmodifiable collection of all the {@link HColumnDescriptor} of + * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of * all the column families of the table. * - * @return Immutable collection of {@link HColumnDescriptor} of all the column + * @return An array of {@link ColumnFamilyDescriptor} of all the column * families. */ - Collection getFamilies(); + ColumnFamilyDescriptor[] getColumnFamilies(); /** * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to HColumnDescriptors. + * TableDescriptor contains mapping of family name to ColumnDescriptor. * This returns all the keys of the family map which represents the column * family names of the table. * * @return Immutable sorted set of the keys of the families. */ - Set getFamiliesKeys(); + Set getColumnFamilyNames(); /** - * Returns the HColumnDescriptor for a specific column family with name as + * Returns the ColumnDescriptor for a specific column family with name as * specified by the parameter column. * - * @param column Column family name + * @param name Column family name * @return Column descriptor for the passed family name or the family on * passed in column. */ - HColumnDescriptor getFamily(final byte[] column); + ColumnFamilyDescriptor getColumnFamily(final byte[] name); /** * This gets the class associated with the flush policy which determines the @@ -168,10 +189,18 @@ public interface TableDescriptor { String getOwnerString(); /** - * Getter for accessing the metadata associated with the key + * Getter for accessing the metadata associated with the key. * * @param key The key. - * @return The value. + * @return A clone value. Null if no mapping for the key + */ + Bytes getValue(Bytes key); + + /** + * Getter for accessing the metadata associated with the key. + * + * @param key The key. + * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); @@ -192,10 +221,10 @@ public interface TableDescriptor { /** * Checks to see if this table contains the given column family * - * @param familyName Family name or column name. + * @param name Family name or column name. * @return true if the table contains the specified family name */ - boolean hasFamily(final byte[] familyName); + boolean hasColumnFamily(final byte[] name); /** * @return true if the read-replicas memstore replication is enabled. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index 6c0fa65f9e1..7a90a71e71f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -32,13 +32,12 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.function.Function; import java.util.regex.Matcher; +import java.util.stream.Stream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -49,12 +48,10 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Public public class TableDescriptorBuilder { - - private static final Log LOG = LogFactory.getLog(TableDescriptorBuilder.class); - + public static final Log LOG = LogFactory.getLog(TableDescriptorBuilder.class); @InterfaceAudience.Private public static final String SPLIT_POLICY = "SPLIT_POLICY"; - + private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); /** * Used by HBase Shell interface to access this metadata * attribute which denotes the maximum size of the store file after which a @@ -101,7 +98,7 @@ public class TableDescriptorBuilder { @InterfaceAudience.Private public static final String FLUSH_POLICY = "FLUSH_POLICY"; - + private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); /** * Used by rest interface to access this metadata attribute * which denotes if it is a catalog table, either hbase:meta . @@ -162,17 +159,6 @@ public class TableDescriptorBuilder { */ private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; - /* - * The below are ugly but better than creating them each time till we - * replace booleans being saved as Strings with plain booleans. Need a - * migration script to do this. TODO. - */ - private static final Bytes FALSE - = new Bytes(Bytes.toBytes(Boolean.FALSE.toString())); - - private static final Bytes TRUE - = new Bytes(Bytes.toBytes(Boolean.TRUE.toString())); - /** * Constant that denotes whether the table is READONLY by default and is false */ @@ -228,7 +214,7 @@ public class TableDescriptorBuilder { */ public static final TableDescriptor NAMESPACE_TABLEDESC = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) - .addFamily(new HColumnDescriptor(NAMESPACE_FAMILY_INFO) + .addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) // Ten is arbitrary number. Keep versions to help debugging. .setMaxVersions(10) .setInMemory(true) @@ -236,8 +222,9 @@ public class TableDescriptorBuilder { .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true)) - .doBuild(); + .setCacheDataInL1(true) + .build()) + .build(); private final ModifyableTableDescriptor desc; /** @@ -248,10 +235,6 @@ public class TableDescriptorBuilder { if (desc instanceof ModifyableTableDescriptor) { return ((ModifyableTableDescriptor) desc).toByteArray(); } - // TODO: remove this if the HTableDescriptor is removed - if (desc instanceof HTableDescriptor) { - return ((HTableDescriptor) desc).toByteArray(); - } return new ModifyableTableDescriptor(desc).toByteArray(); } @@ -261,14 +244,18 @@ public class TableDescriptorBuilder { * @return This instance serialized with pb with pb magic prefix * @throws org.apache.hadoop.hbase.exceptions.DeserializationException */ - public static TableDescriptorBuilder newBuilder(byte[] pbBytes) throws DeserializationException { - return new TableDescriptorBuilder(ModifyableTableDescriptor.parseFrom(pbBytes)); + public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { + return ModifyableTableDescriptor.parseFrom(pbBytes); } public static TableDescriptorBuilder newBuilder(final TableName name) { return new TableDescriptorBuilder(name); } + public static TableDescriptor copy(TableDescriptor desc) throws DeserializationException { + return new ModifyableTableDescriptor(desc); + } + /** * Copy all configuration, values, families, and name from the input. * @param desc The desciptor to copy @@ -301,23 +288,23 @@ public class TableDescriptorBuilder { return this; } - public TableDescriptorBuilder addFamily(final HColumnDescriptor family) { - desc.addFamily(family); + public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { + desc.addColumnFamily(family); return this; } - public TableDescriptorBuilder modifyFamily(final HColumnDescriptor family) { - desc.modifyFamily(family); + public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { + desc.modifyColumnFamily(family); return this; } - public TableDescriptorBuilder remove(Bytes key) { - desc.remove(key); + public TableDescriptorBuilder removeValue(Bytes key) { + desc.removeValue(key); return this; } - public TableDescriptorBuilder remove(byte[] key) { - desc.remove(key); + public TableDescriptorBuilder removeValue(byte[] key) { + desc.removeValue(key); return this; } @@ -326,8 +313,8 @@ public class TableDescriptorBuilder { return this; } - public TableDescriptorBuilder removeFamily(final byte[] column) { - desc.removeFamily(column); + public TableDescriptorBuilder removeColumnFamily(final byte[] name) { + desc.removeColumnFamily(name); return this; } @@ -418,13 +405,7 @@ public class TableDescriptorBuilder { return this; } - // TODO: replaced the HTableDescriptor by TableDescriptor - public HTableDescriptor build() { - return new HTableDescriptor(desc); - } - - // TODO: remove this in HBase 3.0.0. - private TableDescriptor doBuild() { + public TableDescriptor build() { return new ModifyableTableDescriptor(desc); } @@ -452,55 +433,49 @@ public class TableDescriptorBuilder { private final Map configuration = new HashMap<>(); /** - * Maps column family name to the respective HColumnDescriptors + * Maps column family name to the respective FamilyDescriptors */ - private final Map families + private final Map families = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); /** * Construct a table descriptor specifying a TableName object * * @param name Table name. - * @see - * HADOOP-1581 - * HBASE: (HBASE-174) Un-openable tablename bug + * TODO: make this private after removing the HTableDescriptor */ - private ModifyableTableDescriptor(final TableName name) { + @InterfaceAudience.Private + public ModifyableTableDescriptor(final TableName name) { this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP, Collections.EMPTY_MAP); } + private ModifyableTableDescriptor(final TableDescriptor desc) { + this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues(), desc.getConfiguration()); + } + /** * Construct a table descriptor by cloning the descriptor passed as a * parameter. *

* Makes a deep copy of the supplied descriptor. - * TODO: make this private after removing the HTableDescriptor + * @param name The new name * @param desc The descriptor. + * TODO: make this private after removing the HTableDescriptor */ @InterfaceAudience.Private - protected ModifyableTableDescriptor(final TableDescriptor desc) { - this(desc.getTableName(), desc.getFamilies(), desc.getValues(), desc.getConfiguration()); + @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed + public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { + this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues(), desc.getConfiguration()); } - // TODO: make this private after removing the HTableDescriptor - @InterfaceAudience.Private - public ModifyableTableDescriptor(final TableName name, final Collection families, + private ModifyableTableDescriptor(final TableName name, final Collection families, Map values, Map configuration) { this.name = name; - families.forEach(c -> this.families.put(c.getName(), new HColumnDescriptor(c))); - values.forEach(this.values::put); - configuration.forEach(this.configuration::put); - setMetaFlags(name); - } - - /* - * Set meta flags on this table. - * IS_META_KEY is set if its a hbase:meta table - * Called by constructors. - * @param name - */ - private void setMetaFlags(final TableName name) { - values.put(IS_META_KEY, name.equals(TableName.META_TABLE_NAME) ? TRUE : FALSE); + families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); + this.values.putAll(values); + this.configuration.putAll(configuration); + this.values.put(IS_META_KEY, + new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); } /** @@ -510,16 +485,7 @@ public class TableDescriptorBuilder { */ @Override public boolean isMetaRegion() { - return isSomething(IS_META_KEY, false); - } - - private boolean isSomething(final Bytes key, - final boolean valueIfNull) { - byte[] value = getValue(key); - if (value != null) { - return Boolean.valueOf(Bytes.toString(value)); - } - return valueIfNull; + return getOrDefault(IS_META_KEY, Boolean::valueOf, false); } /** @@ -532,39 +498,24 @@ public class TableDescriptorBuilder { return isMetaRegion(); } - /** - * Getter for accessing the metadata associated with the key - * - * @param key The key. - * @return The value. - * @see #values - */ + @Override + public Bytes getValue(Bytes key) { + return values.get(key); + } + @Override public byte[] getValue(byte[] key) { - return getValue(new Bytes(key)); + Bytes value = values.get(new Bytes(key)); + return value == null ? null : value.get(); } - private byte[] getValue(final Bytes key) { - Bytes ibw = values.get(key); - if (ibw == null) { - return null; - } - return ibw.get(); - } - - /** - * Getter for accessing the metadata associated with the key - * - * @param key The key. - * @return The value. - * @see #values - */ - public String getValue(String key) { - byte[] value = getValue(Bytes.toBytes(key)); + private T getOrDefault(Bytes key, Function function, T defaultValue) { + Bytes value = values.get(key); if (value == null) { - return null; + return defaultValue; + } else { + return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); } - return Bytes.toString(value); } /** @@ -609,26 +560,13 @@ public class TableDescriptorBuilder { */ public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { if (value == null) { - remove(key); + values.remove(key); } else { values.put(key, value); } return this; } - /** - * Setter for storing metadata as a (key, value) pair in {@link #values} map - * - * @param key The key. - * @param value The value. If null, removes the setting. - * @return the modifyable TD - * @see #values - */ - public ModifyableTableDescriptor setValue(String key, String value) { - return setValue(toBytesOrNull(key, Bytes::toBytes), - toBytesOrNull(value, Bytes::toBytes)); - } - private static Bytes toBytesOrNull(T t, Function f) { if (t == null) { return null; @@ -642,9 +580,10 @@ public class TableDescriptorBuilder { * * @param key Key whose key and value we're to remove from TableDescriptor * parameters. + * @return the modifyable TD */ - public void remove(final String key) { - remove(new Bytes(Bytes.toBytes(key))); + public ModifyableTableDescriptor removeValue(Bytes key) { + return setValue(key, (Bytes) null); } /** @@ -652,19 +591,10 @@ public class TableDescriptorBuilder { * * @param key Key whose key and value we're to remove from TableDescriptor * parameters. + * @return the modifyable TD */ - public void remove(Bytes key) { - values.remove(key); - } - - /** - * Remove metadata represented by the key from the {@link #values} map - * - * @param key Key whose key and value we're to remove from TableDescriptor - * parameters. - */ - public void remove(final byte[] key) { - remove(new Bytes(key)); + public ModifyableTableDescriptor removeValue(final byte[] key) { + return removeValue(new Bytes(key)); } /** @@ -676,7 +606,7 @@ public class TableDescriptorBuilder { */ @Override public boolean isReadOnly() { - return isSomething(READONLY_KEY, DEFAULT_READONLY); + return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); } /** @@ -690,7 +620,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { - return setValue(READONLY_KEY, readOnly ? TRUE : FALSE); + return setValue(READONLY_KEY, Boolean.toString(readOnly)); } /** @@ -701,7 +631,7 @@ public class TableDescriptorBuilder { */ @Override public boolean isCompactionEnabled() { - return isSomething(COMPACTION_ENABLED_KEY, DEFAULT_COMPACTION_ENABLED); + return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); } /** @@ -711,7 +641,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { - return setValue(COMPACTION_ENABLED_KEY, isEnable ? TRUE : FALSE); + return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); } /** @@ -722,7 +652,7 @@ public class TableDescriptorBuilder { */ @Override public boolean isNormalizationEnabled() { - return isSomething(NORMALIZATION_ENABLED_KEY, DEFAULT_NORMALIZATION_ENABLED); + return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); } /** @@ -732,7 +662,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { - return setValue(NORMALIZATION_ENABLED_KEY, isEnable ? TRUE : FALSE); + return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); } /** @@ -753,18 +683,7 @@ public class TableDescriptorBuilder { */ @Override public Durability getDurability() { - byte[] durabilityValue = getValue(DURABILITY_KEY); - if (durabilityValue == null) { - return DEFAULT_DURABLITY; - } else { - try { - return Durability.valueOf(Bytes.toString(durabilityValue)); - } catch (IllegalArgumentException ex) { - LOG.warn("Received " + ex + " because Durability value for TableDescriptor" - + " is not known. Durability:" + Bytes.toString(durabilityValue)); - return DEFAULT_DURABLITY; - } - } + return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); } /** @@ -786,7 +705,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { - return setValue(SPLIT_POLICY, clazz); + return setValue(SPLIT_POLICY_KEY, clazz); } /** @@ -799,7 +718,7 @@ public class TableDescriptorBuilder { */ @Override public String getRegionSplitPolicyClassName() { - return getValue(SPLIT_POLICY); + return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); } /** @@ -813,11 +732,7 @@ public class TableDescriptorBuilder { */ @Override public long getMaxFileSize() { - byte[] value = getValue(MAX_FILESIZE_KEY); - if (value != null) { - return Long.parseLong(Bytes.toString(value)); - } - return -1; + return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); } /** @@ -850,11 +765,7 @@ public class TableDescriptorBuilder { */ @Override public long getMemStoreFlushSize() { - byte[] value = getValue(MEMSTORE_FLUSHSIZE_KEY); - if (value != null) { - return Long.parseLong(Bytes.toString(value)); - } - return -1; + return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); } /** @@ -879,7 +790,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { - return setValue(FLUSH_POLICY, clazz); + return setValue(FLUSH_POLICY_KEY, clazz); } /** @@ -892,46 +803,45 @@ public class TableDescriptorBuilder { */ @Override public String getFlushPolicyClassName() { - return getValue(FLUSH_POLICY); + return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); } /** * Adds a column family. For the updating purpose please use - * {@link #modifyFamily(HColumnDescriptor)} instead. + * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. * - * @param family HColumnDescriptor of family to add. + * @param family to add. * @return the modifyable TD */ - public ModifyableTableDescriptor addFamily(final HColumnDescriptor family) { + public ModifyableTableDescriptor addColumnFamily(final ColumnFamilyDescriptor family) { if (family.getName() == null || family.getName().length <= 0) { throw new IllegalArgumentException("Family name cannot be null or empty"); } - if (hasFamily(family.getName())) { + if (hasColumnFamily(family.getName())) { throw new IllegalArgumentException("Family '" + family.getNameAsString() + "' already exists so cannot be added"); } - return setFamily(family); + return putColumnFamily(family); } /** * Modifies the existing column family. * - * @param family HColumnDescriptor of family to update + * @param family to update * @return this (for chained invocation) */ - public ModifyableTableDescriptor modifyFamily(final HColumnDescriptor family) { + public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { if (family.getName() == null || family.getName().length <= 0) { throw new IllegalArgumentException("Family name cannot be null or empty"); } - if (!hasFamily(family.getName())) { + if (!hasColumnFamily(family.getName())) { throw new IllegalArgumentException("Column family '" + family.getNameAsString() + "' does not exist"); } - return setFamily(family); + return putColumnFamily(family); } - // TODO: make this private after removing the UnmodifyableTableDescriptor - protected ModifyableTableDescriptor setFamily(HColumnDescriptor family) { + private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { families.put(family.getName(), family); return this; } @@ -943,7 +853,7 @@ public class TableDescriptorBuilder { * @return true if the table contains the specified family name */ @Override - public boolean hasFamily(final byte[] familyName) { + public boolean hasColumnFamily(final byte[] familyName) { return families.containsKey(familyName); } @@ -1085,6 +995,7 @@ public class TableDescriptorBuilder { * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, * if yes then the contents of the descriptors are compared. * + * @param obj The object to compare * @return true if the contents of the the two descriptors exactly match * * @see java.lang.Object#equals(java.lang.Object) @@ -1104,13 +1015,13 @@ public class TableDescriptorBuilder { } /** - * @see java.lang.Object#hashCode() + * @return hash code */ @Override public int hashCode() { int result = this.name.hashCode(); if (this.families.size() > 0) { - for (HColumnDescriptor e : this.families.values()) { + for (ColumnFamilyDescriptor e : this.families.values()) { result ^= e.hashCode(); } } @@ -1131,52 +1042,12 @@ public class TableDescriptorBuilder { */ @Override public int compareTo(final ModifyableTableDescriptor other) { - int result = this.name.compareTo(other.name); - if (result == 0) { - result = families.size() - other.families.size(); - } - if (result == 0 && families.size() != other.families.size()) { - result = Integer.valueOf(families.size()).compareTo(other.families.size()); - } - if (result == 0) { - for (Iterator it = families.values().iterator(), - it2 = other.families.values().iterator(); it.hasNext();) { - result = it.next().compareTo(it2.next()); - if (result != 0) { - break; - } - } - } - if (result == 0) { - // punt on comparison for ordering, just calculate difference - result = this.values.hashCode() - other.values.hashCode(); - if (result < 0) { - result = -1; - } else if (result > 0) { - result = 1; - } - } - if (result == 0) { - result = this.configuration.hashCode() - other.configuration.hashCode(); - if (result < 0) { - result = -1; - } else if (result > 0) { - result = 1; - } - } - return result; + return TableDescriptor.COMPARATOR.compare(this, other); } - /** - * Returns an unmodifiable collection of all the {@link HColumnDescriptor} - * of all the column families of the table. - * - * @return Immutable collection of {@link HColumnDescriptor} of all the - * column families. - */ @Override - public Collection getFamilies() { - return Collections.unmodifiableCollection(this.families.values()); + public ColumnFamilyDescriptor[] getColumnFamilies() { + return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); } /** @@ -1185,8 +1056,7 @@ public class TableDescriptorBuilder { */ @Override public boolean hasSerialReplicationScope() { - return getFamilies() - .stream() + return Stream.of(getColumnFamilies()) .anyMatch(column -> column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL); } @@ -1195,15 +1065,7 @@ public class TableDescriptorBuilder { */ @Override public int getRegionReplication() { - return getIntValue(REGION_REPLICATION_KEY, DEFAULT_REGION_REPLICATION); - } - - private int getIntValue(Bytes key, int defaultVal) { - byte[] val = getValue(key); - if (val == null || val.length == 0) { - return defaultVal; - } - return Integer.parseInt(Bytes.toString(val)); + return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); } /** @@ -1213,8 +1075,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setRegionReplication(int regionReplication) { - return setValue(REGION_REPLICATION_KEY, - new Bytes(Bytes.toBytes(Integer.toString(regionReplication)))); + return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); } /** @@ -1222,7 +1083,7 @@ public class TableDescriptorBuilder { */ @Override public boolean hasRegionMemstoreReplication() { - return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION); + return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); } /** @@ -1236,7 +1097,7 @@ public class TableDescriptorBuilder { * @return the modifyable TD */ public ModifyableTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) { - setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE); + setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); // If the memstore replication is setup, we do not have to wait for observing a flush event // from primary before starting to serve reads, because gaps from replication is not applicable return setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, @@ -1249,48 +1110,24 @@ public class TableDescriptorBuilder { @Override public int getPriority() { - return getIntValue(PRIORITY_KEY, DEFAULT_PRIORITY); + return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); } /** * Returns all the column family names of the current table. The map of - * TableDescriptor contains mapping of family name to HColumnDescriptors. + * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. * This returns all the keys of the family map which represents the column * family names of the table. * * @return Immutable sorted set of the keys of the families. */ @Override - public Set getFamiliesKeys() { + public Set getColumnFamilyNames() { return Collections.unmodifiableSet(this.families.keySet()); } /** - * Returns the count of the column families of the table. - * - * @return Count of column families of the table - */ - @Override - public int getColumnFamilyCount() { - return families.size(); - } - - /** - * Returns an array all the {@link HColumnDescriptor} of the column families - * of the table. - * - * @return Array of all the HColumnDescriptors of the current table - * - * @see #getFamilies() - */ - @Override - public HColumnDescriptor[] getColumnFamilies() { - Collection hColumnDescriptors = getFamilies(); - return hColumnDescriptors.toArray(new HColumnDescriptor[hColumnDescriptors.size()]); - } - - /** - * Returns the HColumnDescriptor for a specific column family with name as + * Returns the ColumnFamilyDescriptor for a specific column family with name as * specified by the parameter column. * * @param column Column family name @@ -1298,19 +1135,19 @@ public class TableDescriptorBuilder { * passed in column. */ @Override - public HColumnDescriptor getFamily(final byte[] column) { + public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { return this.families.get(column); } /** - * Removes the HColumnDescriptor with name specified by the parameter column + * Removes the ColumnFamilyDescriptor with name specified by the parameter column * from the table descriptor * * @param column Name of the column family to be removed. * @return Column descriptor for the passed family name or the family on * passed in column. */ - public HColumnDescriptor removeFamily(final byte[] column) { + public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { return this.families.remove(column); } @@ -1523,7 +1360,7 @@ public class TableDescriptorBuilder { } // if we found a match, remove it if (match != null) { - remove(match); + ModifyableTableDescriptor.this.removeValue(match); } } @@ -1535,27 +1372,22 @@ public class TableDescriptorBuilder { // used by admin.rb:alter(table_name,*args) to update owner. @Deprecated public ModifyableTableDescriptor setOwnerString(String ownerString) { - if (ownerString != null) { - setValue(OWNER_KEY, ownerString); - } else { - remove(OWNER_KEY); - } - return this; + return setValue(OWNER_KEY, ownerString); } @Override @Deprecated public String getOwnerString() { - if (getValue(OWNER_KEY) != null) { - return Bytes.toString(getValue(OWNER_KEY)); - } // Note that every table should have an owner (i.e. should have OWNER_KEY set). // hbase:meta should return system user as owner, not null (see // MasterFileSystem.java:bootstrap()). - return null; + return getOrDefault(OWNER_KEY, Function.identity(), null); } - public byte[] toByteArray() { + /** + * @return the bytes in pb format + */ + private byte[] toByteArray() { return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray()); } @@ -1567,7 +1399,7 @@ public class TableDescriptorBuilder { * @throws DeserializationException * @see #toByteArray() */ - public static TableDescriptor parseFrom(final byte[] bytes) + private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); @@ -1609,7 +1441,7 @@ public class TableDescriptorBuilder { */ public ModifyableTableDescriptor setConfiguration(String key, String value) { if (value == null) { - removeConfiguration(key); + configuration.remove(key); } else { configuration.put(key, value); } @@ -1620,9 +1452,15 @@ public class TableDescriptorBuilder { * Remove a config setting represented by the key from the * {@link #configuration} map * @param key Config key. + * @return the modifyable TD */ - public void removeConfiguration(final String key) { - configuration.remove(key); + public ModifyableTableDescriptor removeConfiguration(final String key) { + return setConfiguration(key, null); + } + + @Override + public int getColumnFamilyCount() { + return families.size(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 5c4dd550219..b1969115a4b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ClientUtil; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Cursor; @@ -2924,7 +2926,7 @@ public final class ProtobufUtil { * @param hcd the HColummnDescriptor * @return Convert this instance to a the pb column family type */ - public static ColumnFamilySchema convertToColumnFamilySchema(HColumnDescriptor hcd) { + public static ColumnFamilySchema convertToColumnFamilySchema(ColumnFamilyDescriptor hcd) { ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); builder.setName(UnsafeByteOperations.unsafeWrap(hcd.getName())); for (Map.Entry e : hcd.getValues().entrySet()) { @@ -2947,6 +2949,7 @@ public final class ProtobufUtil { * @param cfs the ColumnFamilySchema * @return An {@link HColumnDescriptor} made from the passed in cfs */ + @Deprecated public static HColumnDescriptor convertToHColumnDesc(final ColumnFamilySchema cfs) { // Use the empty constructor so we preserve the initial values set on construction for things // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for @@ -2961,6 +2964,22 @@ public final class ProtobufUtil { return hcd; } + /** + * Converts a ColumnFamilySchema to HColumnDescriptor + * @param cfs the ColumnFamilySchema + * @return An {@link HColumnDescriptor} made from the passed in cfs + */ + public static ColumnFamilyDescriptor convertToColumnDesc(final ColumnFamilySchema cfs) { + // Use the empty constructor so we preserve the initial values set on construction for things + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // unrelated-looking test failures that are hard to trace back to here. + ColumnFamilyDescriptorBuilder builder + = ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); + cfs.getAttributesList().forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + cfs.getConfigurationList().forEach(a -> builder.setConfiguration(a.getName(), a.getValue())); + return builder.build(); + } + /** * Converts an HTableDescriptor to TableSchema * @param htd the HTableDescriptor @@ -2975,7 +2994,7 @@ public final class ProtobufUtil { aBuilder.setSecond(UnsafeByteOperations.unsafeWrap(e.getValue().get())); builder.addAttributes(aBuilder.build()); } - for (HColumnDescriptor hcd : htd.getColumnFamilies()) { + for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { builder.addColumnFamilies(convertToColumnFamilySchema(hcd)); } for (Map.Entry e : htd.getConfiguration().entrySet()) { @@ -3024,8 +3043,8 @@ public final class ProtobufUtil { = TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); ts.getColumnFamiliesList() .stream() - .map(ProtobufUtil::convertToHColumnDesc) - .forEach(builder::addFamily); + .map(ProtobufUtil::convertToColumnDesc) + .forEach(builder::addColumnFamily); ts.getAttributesList() .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); ts.getConfigurationList() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 134c319831a..67f7d0a5ad6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -25,7 +25,6 @@ import java.util.regex.Pattern; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -1031,7 +1031,7 @@ public final class RequestConverter { */ public static AddColumnRequest buildAddColumnRequest( final TableName tableName, - final HColumnDescriptor column, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); @@ -1071,7 +1071,7 @@ public final class RequestConverter { */ public static ModifyColumnRequest buildModifyColumnRequest( final TableName tableName, - final HColumnDescriptor column, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index cabf55745d9..cfbfccb1c28 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -37,6 +37,7 @@ import org.junit.experimental.categories.Category; /** Tests the HColumnDescriptor with appropriate arguments */ @Category({MiscTests.class, SmallTests.class}) +@Deprecated public class TestHColumnDescriptor { @Test public void testPb() throws DeserializationException { @@ -71,6 +72,7 @@ public class TestHColumnDescriptor { assertTrue(hcd.equals(deserializedHcd)); assertEquals(v, hcd.getBlocksize()); assertEquals(v, hcd.getTimeToLive()); + assertEquals(v, hcd.getScope()); assertEquals(hcd.getValue("a"), deserializedHcd.getValue("a")); assertEquals(hcd.getMaxVersions(), deserializedHcd.getMaxVersions()); assertEquals(hcd.getMinVersions(), deserializedHcd.getMinVersions()); @@ -92,7 +94,7 @@ public class TestHColumnDescriptor { try { new HColumnDescriptor("".getBytes()); } catch (IllegalArgumentException e) { - assertEquals("Family name can not be empty", e.getLocalizedMessage()); + assertEquals("Column Family name can not be empty", e.getLocalizedMessage()); } } @@ -115,9 +117,11 @@ public class TestHColumnDescriptor { boolean isMob = true; long threshold = 1000; String policy = "weekly"; - String isMobString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(isMob)), + // We unify the format of all values saved in the descriptor. + // Each value is stored as bytes of string. + String isMobString = PrettyPrinter.format(String.valueOf(isMob), HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); - String thresholdString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(threshold)), + String thresholdString = PrettyPrinter.format(String.valueOf(threshold), HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index bcff565e14d..9bbdf5053af 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -43,6 +43,7 @@ import org.junit.rules.TestName; * Test setting values in the descriptor */ @Category({MiscTests.class, SmallTests.class}) +@Deprecated public class TestHTableDescriptor { private static final Log LOG = LogFactory.getLog(TestHTableDescriptor.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java new file mode 100644 index 00000000000..5fe329dc6ed --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.BuilderStyleTest; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.PrettyPrinter; +import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MiscTests.class, SmallTests.class}) +public class TestColumnFamilyDescriptorBuilder { + @Test + public void testBuilder() throws DeserializationException { + ColumnFamilyDescriptorBuilder builder + = ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) + .setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setBloomFilterType(BloomType.NONE) + .setCacheDataInL1(true); + final int v = 123; + builder.setBlocksize(v); + builder.setTimeToLive(v); + builder.setBlockCacheEnabled(!HColumnDescriptor.DEFAULT_BLOCKCACHE); + builder.setValue(Bytes.toBytes("a"), Bytes.toBytes("b")); + builder.setMaxVersions(v); + assertEquals(v, builder.build().getMaxVersions()); + builder.setMinVersions(v); + assertEquals(v, builder.build().getMinVersions()); + builder.setKeepDeletedCells(KeepDeletedCells.TRUE); + builder.setInMemory(!HColumnDescriptor.DEFAULT_IN_MEMORY); + boolean inmemory = builder.build().isInMemory(); + builder.setScope(v); + builder.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); + builder.setBloomFilterType(BloomType.ROW); + builder.setCompressionType(Algorithm.SNAPPY); + builder.setMobEnabled(true); + builder.setMobThreshold(1000L); + builder.setDFSReplication((short) v); + + ColumnFamilyDescriptor hcd = builder.build(); + byte [] bytes = ColumnFamilyDescriptorBuilder.toByteArray(hcd); + ColumnFamilyDescriptor deserializedHcd = ColumnFamilyDescriptorBuilder.parseFrom(bytes); + assertTrue(hcd.equals(deserializedHcd)); + assertEquals(v, hcd.getBlocksize()); + assertEquals(v, hcd.getTimeToLive()); + assertTrue(Bytes.equals(hcd.getValue(Bytes.toBytes("a")), deserializedHcd.getValue(Bytes.toBytes("a")))); + assertEquals(hcd.getMaxVersions(), deserializedHcd.getMaxVersions()); + assertEquals(hcd.getMinVersions(), deserializedHcd.getMinVersions()); + assertEquals(hcd.getKeepDeletedCells(), deserializedHcd.getKeepDeletedCells()); + assertEquals(inmemory, deserializedHcd.isInMemory()); + assertEquals(hcd.getScope(), deserializedHcd.getScope()); + assertTrue(deserializedHcd.getCompressionType().equals(Compression.Algorithm.SNAPPY)); + assertTrue(deserializedHcd.getDataBlockEncoding().equals(DataBlockEncoding.FAST_DIFF)); + assertTrue(deserializedHcd.getBloomFilterType().equals(BloomType.ROW)); + assertEquals(hcd.isMobEnabled(), deserializedHcd.isMobEnabled()); + assertEquals(hcd.getMobThreshold(), deserializedHcd.getMobThreshold()); + assertEquals(v, deserializedHcd.getDFSReplication()); + } + + @Test + /** Tests HColumnDescriptor with empty familyName*/ + public void testHColumnDescriptorShouldThrowIAEWhenFamiliyNameEmpty() + throws Exception { + try { + ColumnFamilyDescriptorBuilder.newBuilder("".getBytes()).build(); + } catch (IllegalArgumentException e) { + assertEquals("Column Family name can not be empty", e.getLocalizedMessage()); + } + } + + /** + * Test that we add and remove strings from configuration properly. + */ + @Test + public void testAddGetRemoveConfiguration() { + ColumnFamilyDescriptorBuilder builder + = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + String key = "Some"; + String value = "value"; + builder.setConfiguration(key, value); + assertEquals(value, builder.build().getConfigurationValue(key)); + builder.removeConfiguration(key); + assertEquals(null, builder.build().getConfigurationValue(key)); + } + + @Test + public void testMobValuesInHColumnDescriptorShouldReadable() { + boolean isMob = true; + long threshold = 1000; + String policy = "weekly"; + // We unify the format of all values saved in the descriptor. + // Each value is stored as bytes of string. + String isMobString = PrettyPrinter.format(String.valueOf(isMob), + HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); + String thresholdString = PrettyPrinter.format(String.valueOf(threshold), + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); + String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); + assertEquals(String.valueOf(isMob), isMobString); + assertEquals(String.valueOf(threshold), thresholdString); + assertEquals(String.valueOf(policy), policyString); + } + + @Test + public void testClassMethodsAreBuilderStyle() { + /* HColumnDescriptor should have a builder style setup where setXXX/addXXX methods + * can be chainable together: + * . For example: + * HColumnDescriptor hcd + * = new HColumnDescriptor() + * .setFoo(foo) + * .setBar(bar) + * .setBuz(buz) + * + * This test ensures that all methods starting with "set" returns the declaring object + */ + + BuilderStyleTest.assertClassesAreBuilderStyle(ColumnFamilyDescriptorBuilder.class); + } + + @Test + public void testSetTimeToLive() throws HBaseException { + String ttl; + ColumnFamilyDescriptorBuilder builder + = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + + ttl = "50000"; + builder.setTimeToLive(ttl); + Assert.assertEquals(50000, builder.build().getTimeToLive()); + + ttl = "50000 seconds"; + builder.setTimeToLive(ttl); + Assert.assertEquals(50000, builder.build().getTimeToLive()); + + ttl = ""; + builder.setTimeToLive(ttl); + Assert.assertEquals(0, builder.build().getTimeToLive()); + + ttl = "FOREVER"; + builder.setTimeToLive(ttl); + Assert.assertEquals(HConstants.FOREVER, builder.build().getTimeToLive()); + + ttl = "1 HOUR 10 minutes 1 second"; + builder.setTimeToLive(ttl); + Assert.assertEquals(4201, builder.build().getTimeToLive()); + + ttl = "500 Days 23 HOURS"; + builder.setTimeToLive(ttl); + Assert.assertEquals(43282800, builder.build().getTimeToLive()); + + ttl = "43282800 SECONDS (500 Days 23 hours)"; + builder.setTimeToLive(ttl); + Assert.assertEquals(43282800, builder.build().getTimeToLive()); + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java new file mode 100644 index 00000000000..12f1bec6850 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.KeepDeletedCells; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.BuilderStyleTest; +import org.apache.hadoop.hbase.util.Bytes; +import static org.junit.Assert.fail; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +@Category({ClientTests.class, SmallTests.class}) +public class TestImmutableHColumnDescriptor { + @Rule + public TestName name = new TestName(); + private static final List> TEST_FUNCTION = Arrays.asList( + hcd -> hcd.setValue("a", "a"), + hcd -> hcd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), + hcd -> hcd.setConfiguration("aaa", "ccc"), + hcd -> hcd.remove(Bytes.toBytes("aaa")), + hcd -> hcd.removeConfiguration("xxx"), + hcd -> hcd.setBlockCacheEnabled(false), + hcd -> hcd.setBlocksize(10), + hcd -> hcd.setBloomFilterType(BloomType.NONE), + hcd -> hcd.setCacheBloomsOnWrite(false), + hcd -> hcd.setCacheDataInL1(true), + hcd -> hcd.setCacheDataOnWrite(true), + hcd -> hcd.setCacheIndexesOnWrite(true), + hcd -> hcd.setCompactionCompressionType(Compression.Algorithm.LZO), + hcd -> hcd.setCompressTags(true), + hcd -> hcd.setCompressionType(Compression.Algorithm.LZO), + hcd -> hcd.setDFSReplication((short) 10), + hcd -> hcd.setDataBlockEncoding(DataBlockEncoding.NONE), + hcd -> hcd.setEncryptionKey(Bytes.toBytes("xxx")), + hcd -> hcd.setEncryptionType("xxx"), + hcd -> hcd.setEvictBlocksOnClose(true), + hcd -> hcd.setInMemory(true), + hcd -> hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE), + hcd -> hcd.setKeepDeletedCells(KeepDeletedCells.FALSE), + hcd -> hcd.setMaxVersions(1000), + hcd -> hcd.setMinVersions(10), + hcd -> hcd.setMobCompactPartitionPolicy(MobCompactPartitionPolicy.DAILY), + hcd -> hcd.setMobEnabled(true), + hcd -> hcd.setMobThreshold(10), + hcd -> hcd.setPrefetchBlocksOnOpen(true), + hcd -> hcd.setScope(0), + hcd -> hcd.setStoragePolicy("aaa"), + hcd -> hcd.setTimeToLive(100), + hcd -> hcd.setVersions(1, 10) + ); + + @Test + public void testImmutable() { + ImmutableHColumnDescriptor hcd = new ImmutableHColumnDescriptor( + new HColumnDescriptor(Bytes.toBytes(name.getMethodName()))); + for (int i = 0; i != TEST_FUNCTION.size(); ++i) { + try { + TEST_FUNCTION.get(i).accept(hcd); + fail("ImmutableHTableDescriptor can't be modified!!! The index of method is " + i); + } catch (UnsupportedOperationException e) { + } + } + } + + @Test + public void testClassMethodsAreBuilderStyle() { + BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHColumnDescriptor.class); + } +} \ No newline at end of file diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java index 91ef72a0432..6c20bc8f6ce 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java @@ -72,17 +72,38 @@ public class TestImmutableHTableDescriptor { @Test public void testImmutable() { - ImmutableHTableDescriptor htd = new ImmutableHTableDescriptor( - new HTableDescriptor(TableName.valueOf(name.getMethodName()))); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); + ImmutableHTableDescriptor immutableHtd = new ImmutableHTableDescriptor(htd); TEST_FUNCTION.forEach(f -> { try { - f.accept(htd); + f.accept(immutableHtd); fail("ImmutableHTableDescriptor can't be modified!!!"); } catch (UnsupportedOperationException e) { } }); } + @Test + public void testImmutableHColumnDescriptor() { + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); + htd.addFamily(new HColumnDescriptor(Bytes.toBytes("family"))); + ImmutableHTableDescriptor immutableHtd = new ImmutableHTableDescriptor(htd); + for (HColumnDescriptor hcd : immutableHtd.getColumnFamilies()) { + assertReadOnly(hcd); + } + for (HColumnDescriptor hcd : immutableHtd.getFamilies()) { + assertReadOnly(hcd); + } + } + + private void assertReadOnly(HColumnDescriptor hcd) { + try { + hcd.setBlocksize(10); + fail("ImmutableHColumnDescriptor can't be modified!!!"); + } catch (UnsupportedOperationException e) { + } + } + @Test public void testClassMethodsAreBuilderStyle() { /* ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index c4ecacfd097..bc1c19e70fa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -28,7 +28,6 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -109,7 +108,7 @@ public class TestTableDescriptorBuilder { .build(); byte [] bytes = TableDescriptorBuilder.toByteArray(htd); - TableDescriptor deserializedHtd = TableDescriptorBuilder.newBuilder(bytes).build(); + TableDescriptor deserializedHtd = TableDescriptorBuilder.parseFrom(bytes); assertEquals(htd, deserializedHtd); assertEquals(v, deserializedHtd.getMaxFileSize()); assertTrue(deserializedHtd.isReadOnly()); @@ -195,7 +194,7 @@ public class TestTableDescriptorBuilder { .build(); assertTrue(Bytes.equals(value, desc.getValue(key))); desc = TableDescriptorBuilder.newBuilder(desc) - .remove(key) + .removeValue(key) .build(); assertTrue(desc.getValue(key) == null); } @@ -299,24 +298,26 @@ public class TestTableDescriptorBuilder { @Test public void testModifyFamily() { byte[] familyName = Bytes.toBytes("cf"); - HColumnDescriptor hcd = new HColumnDescriptor(familyName); - hcd.setBlocksize(1000); - hcd.setDFSReplication((short) 3); + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) + .setBlocksize(1000) + .setDFSReplication((short) 3) + .build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .addFamily(hcd) + .addColumnFamily(hcd) .build(); - assertEquals(1000, htd.getFamily(familyName).getBlocksize()); - assertEquals(3, htd.getFamily(familyName).getDFSReplication()); - hcd = new HColumnDescriptor(familyName); - hcd.setBlocksize(2000); - hcd.setDFSReplication((short) 1); + assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); + assertEquals(3, htd.getColumnFamily(familyName).getDFSReplication()); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) + .setBlocksize(2000) + .setDFSReplication((short) 1) + .build(); htd = TableDescriptorBuilder.newBuilder(htd) - .modifyFamily(hcd) + .modifyColumnFamily(hcd) .build(); - assertEquals(2000, htd.getFamily(familyName).getBlocksize()); - assertEquals(1, htd.getFamily(familyName).getDFSReplication()); + assertEquals(2000, htd.getColumnFamily(familyName).getBlocksize()); + assertEquals(1, htd.getColumnFamily(familyName).getDFSReplication()); } @Test(expected=IllegalArgumentException.class) @@ -325,23 +326,25 @@ public class TestTableDescriptorBuilder { HColumnDescriptor hcd = new HColumnDescriptor(familyName); TableDescriptor htd = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName())) - .modifyFamily(hcd) + .modifyColumnFamily(hcd) .build(); } @Test(expected=IllegalArgumentException.class) public void testAddDuplicateFamilies() { byte[] familyName = Bytes.toBytes("cf"); - HColumnDescriptor hcd = new HColumnDescriptor(familyName); - hcd.setBlocksize(1000); - TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .addFamily(hcd) + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) + .setBlocksize(1000) + .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .addColumnFamily(hcd) + .build(); + assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) + .setBlocksize(2000) .build(); - assertEquals(1000, htd.getFamily(familyName).getBlocksize()); - hcd = new HColumnDescriptor(familyName); - hcd.setBlocksize(2000); // add duplicate column - TableDescriptorBuilder.newBuilder(htd).addFamily(hcd).build(); + TableDescriptorBuilder.newBuilder(htd).addColumnFamily(hcd).build(); } @Test @@ -358,18 +361,18 @@ public class TestTableDescriptorBuilder { hcdWithScope.setScope(HConstants.REPLICATION_SCOPE_SERIAL); HColumnDescriptor hcdWithoutScope = new HColumnDescriptor(Bytes.toBytes("cf1")); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .addFamily(hcdWithoutScope) + .addColumnFamily(hcdWithoutScope) .build(); assertFalse(htd.hasSerialReplicationScope()); htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .addFamily(hcdWithScope) + .addColumnFamily(hcdWithScope) .build(); assertTrue(htd.hasSerialReplicationScope()); htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .addFamily(hcdWithScope) - .addFamily(hcdWithoutScope) + .addColumnFamily(hcdWithScope) + .addColumnFamily(hcdWithoutScope) .build(); assertTrue(htd.hasSerialReplicationScope()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 791445bad37..d5140037a34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -21,14 +21,14 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY; import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import java.io.IOException; -import java.lang.management.ManagementFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; @@ -37,6 +37,7 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; + /** * Stores all of the cache objects and configuration for a single HFile. */ @@ -232,7 +233,7 @@ public class CacheConfig { * @param conf hbase configuration * @param family column family configuration */ - public CacheConfig(Configuration conf, HColumnDescriptor family) { + public CacheConfig(Configuration conf, ColumnFamilyDescriptor family) { this(CacheConfig.instantiateBlockCache(conf), conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ) && family.isBlockCacheEnabled(), @@ -250,8 +251,8 @@ public class CacheConfig { conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), - conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1, - HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(), + conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, + ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig for " + family.getNameAsString() + ": " + this); @@ -260,8 +261,8 @@ public class CacheConfig { /** * Create a cache configuration using the specified configuration object and * defaults for family level settings. Only use if no column family context. Prefer - * {@link CacheConfig#CacheConfig(Configuration, HColumnDescriptor)} - * @see #CacheConfig(Configuration, HColumnDescriptor) + * {@link CacheConfig#CacheConfig(Configuration, ColumnFamilyDescriptor)} + * @see #CacheConfig(Configuration, ColumnFamilyDescriptor) * @param conf hbase configuration */ public CacheConfig(Configuration conf) { @@ -275,8 +276,8 @@ public class CacheConfig { conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), - conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1, - HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1), + conf.getBoolean(ColumnFamilyDescriptorBuilder.CACHE_DATA_IN_L1, + ColumnFamilyDescriptorBuilder.DEFAULT_CACHE_DATA_IN_L1), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig: " + this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index c43a4d15d83..c6397f36b01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.VersionInfoUtil; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 2e2aa5df01d..c423f173c45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.assignment; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import java.io.IOException; import java.io.InputStream; @@ -521,7 +522,7 @@ public class SplitTableRegionProcedure // Split each store file. final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family: regionFs.getFamilies()) { - final HColumnDescriptor hcd = htd.getFamily(family.getBytes()); + final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes()); final Collection storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { final CacheConfig cacheConf = new CacheConfig(conf, hcd); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 70fe5c5e133..dcd3144d2ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -130,7 +130,7 @@ public class FSTableDescriptors implements TableDescriptors { public static HTableDescriptor createMetaTableDescriptor(final Configuration conf) throws IOException { return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY) + .addColumnFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) @@ -142,7 +142,7 @@ public class FSTableDescriptors implements TableDescriptors { // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true)) - .addFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY) + .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) @@ -154,7 +154,7 @@ public class FSTableDescriptors implements TableDescriptors { // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true)) - .addFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY) + .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) @@ -166,7 +166,7 @@ public class FSTableDescriptors implements TableDescriptors { // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true)) - .addFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY) + .addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY) .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) .setInMemory(true) @@ -178,7 +178,7 @@ public class FSTableDescriptors implements TableDescriptors { // Enable cache of data blocks in L1 if more than one caching tier deployed: // e.g. if using CombinedBlockCache (BucketCache). .setCacheDataInL1(true)) - .addFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY) + .addColumnFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY) // Ten is arbitrary number. Keep versions to help debugging. .setMaxVersions(10) .setInMemory(true) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index 15250ac35b0..cc7d0a3648a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -101,7 +101,7 @@ public class TestAcidGuarantees implements Tool { } if (useMob) { - HTableDescriptor htd = util.getAdmin().getTableDescriptor(TABLE_NAME); + HTableDescriptor htd = new HTableDescriptor(util.getAdmin().getTableDescriptor(TABLE_NAME)); HColumnDescriptor hcd = htd.getColumnFamilies()[0]; // force mob enabled such that all data is mob data hcd.setMobEnabled(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index f84d9c2460b..c5681b12aad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -526,7 +526,7 @@ public class TestAdmin1 { expectedException = true; } assertFalse(expectedException); - HTableDescriptor modifiedHtd = this.admin.getTableDescriptor(tableName); + HTableDescriptor modifiedHtd = new HTableDescriptor(this.admin.getTableDescriptor(tableName)); assertFalse(htd.equals(modifiedHtd)); assertTrue(copy.equals(modifiedHtd)); assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 3201fbedd39..c0ccd5e018d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -725,7 +725,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase { admin.modifyColumnFamily(tableName, cfDescriptor).join(); TableDescriptor htd = admin.getTableDescriptor(tableName).get(); - HColumnDescriptor hcfd = htd.getFamily(FAMILY_0); + ColumnFamilyDescriptor hcfd = htd.getColumnFamily(FAMILY_0); assertTrue(hcfd.getBlocksize() == newBlockSize); } finally { admin.deleteTable(tableName).join(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java index f3797d15c77..3b5522bc47b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java @@ -104,7 +104,7 @@ public class TestReplicationAdminWithClusters extends TestReplicationBase { @Test(timeout = 300000) public void testEnableReplicationWhenReplicationNotEnabled() throws Exception { - HTableDescriptor table = admin1.getTableDescriptor(tableName); + HTableDescriptor table = new HTableDescriptor(admin1.getTableDescriptor(tableName)); for (HColumnDescriptor fam : table.getColumnFamilies()) { fam.setScope(HConstants.REPLICATION_SCOPE_LOCAL); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 59e8fb3ee24..569b1700689 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -172,7 +172,7 @@ public class TestCatalogJanitor { */ private TableDescriptor createTableDescriptorForCurrentMethod() { return TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())). - addFamily(new HColumnDescriptor(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)). + addColumnFamily(new HColumnDescriptor(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)). build(); }