From b77dca0c7fdeaf695c06bd156646171b1505f564 Mon Sep 17 00:00:00 2001 From: mbautin Date: Thu, 23 Feb 2012 23:18:27 +0000 Subject: [PATCH] [jira] [HBASE-5357] Refactoring: use the builder pattern for HColumnDescriptor Summary: Deprecate three ugly HColumnDescriptor constructors and use a "builder" pattern instead, e.g. new HColumnDescriptor(cfName).setMaxVersions(5).setBlockSize(8192), etc. Setters have now become "builder" methods. Test Plan: Run unit tests Reviewers: JIRA, todd, stack, tedyu, Kannan, Karthik, Liyin Reviewed By: tedyu Differential Revision: https://reviews.facebook.net/D1851 git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1293026 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hbase/HColumnDescriptor.java | 78 ++++++++++++------- .../apache/hadoop/hbase/HTableDescriptor.java | 27 ++++--- .../client/UnmodifyableHColumnDescriptor.java | 14 ++-- .../hadoop/hbase/thrift/ThriftUtilities.java | 10 ++- .../apache/hadoop/hbase/HBaseTestCase.java | 36 +++------ .../hadoop/hbase/HBaseTestingUtility.java | 66 +++------------- .../hadoop/hbase/TestSerialization.java | 27 +++---- .../hbase/client/TestFromClientSide.java | 15 +--- .../hbase/io/encoding/TestEncodedSeekers.java | 10 ++- .../hbase/io/hfile/TestCacheOnWrite.java | 14 +++- .../hfile/TestForceCacheImportantBlocks.java | 10 +-- .../hfile/TestScannerSelectionUsingTTL.java | 9 +-- .../mapreduce/TestHFileOutputFormat.java | 7 +- .../hbase/mapreduce/TestImportExport.java | 34 ++------ .../hbase/regionserver/TestBlocksRead.java | 30 +++---- .../hbase/regionserver/TestColumnSeeking.java | 7 +- .../regionserver/TestFSErrorsExposed.java | 7 +- .../hbase/regionserver/TestHRegion.java | 22 +++--- .../regionserver/TestMultiColumnScanner.java | 11 ++- .../regionserver/TestScanWithBloomError.java | 10 ++- .../hbase/regionserver/TestScanner.java | 12 +-- .../regionserver/TestSeekOptimizations.java | 7 +- .../hbase/regionserver/TestWideScanner.java | 22 ++---- .../TestThriftHBaseServiceHandler.java | 5 +- 24 files changed, 219 insertions(+), 271 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 685a1a6636b..324ec46f34d 100644 --- a/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -244,7 +244,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, final boolean blockCacheEnabled, @@ -274,7 +276,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, final boolean blockCacheEnabled, final int blocksize, @@ -312,7 +316,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte[] familyName, final int minVersions, final int maxVersions, final boolean keepDeletedCells, final String compression, final boolean encodeOnDisk, @@ -427,10 +433,12 @@ public class HColumnDescriptor implements WritableComparable /** * @param key The key. * @param value The value. + * @return this (for chained invocation) */ - public void setValue(byte[] key, byte[] value) { + public HColumnDescriptor setValue(byte[] key, byte[] value) { values.put(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value)); + return this; } /** @@ -443,9 +451,11 @@ public class HColumnDescriptor implements WritableComparable /** * @param key The key. * @param value The value. + * @return this (for chained invocation) */ - public void setValue(String key, String value) { + public HColumnDescriptor setValue(String key, String value) { setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + return this; } /** @return compression type being used for the column family */ @@ -474,10 +484,12 @@ public class HColumnDescriptor implements WritableComparable /** * @param maxVersions maximum number of versions + * @return this (for chained invocation) */ - public void setMaxVersions(int maxVersions) { + public HColumnDescriptor setMaxVersions(int maxVersions) { setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); cachedMaxVersions = maxVersions; + return this; } /** @@ -495,10 +507,12 @@ public class HColumnDescriptor implements WritableComparable /** * @param s Blocksize to use when writing out storefiles/hfiles on this * column family. + * @return this (for chained invocation) */ - public void setBlocksize(int s) { + public HColumnDescriptor setBlocksize(int s) { setValue(BLOCKSIZE, Integer.toString(s)); this.blocksize = null; + return this; } /** @@ -514,8 +528,9 @@ public class HColumnDescriptor implements WritableComparable * See LZO Compression * for how to enable it. * @param type Compression type setting. + * @return this (for chained invocation) */ - public void setCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { String compressionType; switch (type) { case LZO: compressionType = "LZO"; break; @@ -523,7 +538,7 @@ public class HColumnDescriptor implements WritableComparable case SNAPPY: compressionType = "SNAPPY"; break; default: compressionType = "NONE"; break; } - setValue(COMPRESSION, compressionType); + return setValue(COMPRESSION, compressionType); } /** @return data block encoding algorithm used on disk */ @@ -546,9 +561,10 @@ public class HColumnDescriptor implements WritableComparable /** * Set the flag indicating that we only want to encode data block in cache * but not on disk. + * @return this (for chained invocation) */ - public void setEncodeOnDisk(boolean encodeOnDisk) { - setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); + public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) { + return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); } /** @@ -566,15 +582,16 @@ public class HColumnDescriptor implements WritableComparable /** * Set data block encoding algorithm used in block cache. * @param type What kind of data block encoding will be used. + * @return this (for chained invocation) */ - public void setDataBlockEncoding(DataBlockEncoding type) { + public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) { String name; if (type != null) { name = type.toString(); } else { name = DataBlockEncoding.NONE.toString(); } - setValue(DATA_BLOCK_ENCODING, name); + return setValue(DATA_BLOCK_ENCODING, name); } /** @@ -590,8 +607,10 @@ public class HColumnDescriptor implements WritableComparable * See LZO Compression * for how to enable it. * @param type Compression type setting. + * @return this (for chained invocation) */ - public void setCompactionCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompactionCompressionType( + Compression.Algorithm type) { String compressionType; switch (type) { case LZO: compressionType = "LZO"; break; @@ -599,7 +618,7 @@ public class HColumnDescriptor implements WritableComparable case SNAPPY: compressionType = "SNAPPY"; break; default: compressionType = "NONE"; break; } - setValue(COMPRESSION_COMPACT, compressionType); + return setValue(COMPRESSION_COMPACT, compressionType); } /** @@ -615,9 +634,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param inMemory True if we are to keep all values in the HRegionServer * cache + * @return this (for chained invocation) */ - public void setInMemory(boolean inMemory) { - setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); + public HColumnDescriptor setInMemory(boolean inMemory) { + return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); } public boolean getKeepDeletedCells() { @@ -631,9 +651,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param keepDeletedCells True if deleted rows should not be collected * immediately. + * @return this (for chained invocation) */ - public void setKeepDeletedCells(boolean keepDeletedCells) { - setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); + public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) { + return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); } /** @@ -646,9 +667,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param timeToLive Time-to-live of cell contents, in seconds. + * @return this (for chained invocation) */ - public void setTimeToLive(int timeToLive) { - setValue(TTL, Integer.toString(timeToLive)); + public HColumnDescriptor setTimeToLive(int timeToLive) { + return setValue(TTL, Integer.toString(timeToLive)); } /** @@ -662,9 +684,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param minVersions The minimum number of versions to keep. * (used when timeToLive is set) + * @return this (for chained invocation) */ - public void setMinVersions(int minVersions) { - setValue(MIN_VERSIONS, Integer.toString(minVersions)); + public HColumnDescriptor setMinVersions(int minVersions) { + return setValue(MIN_VERSIONS, Integer.toString(minVersions)); } /** @@ -679,9 +702,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param blockCacheEnabled True if MapFile blocks should be cached. + * @return this (for chained invocation) */ - public void setBlockCacheEnabled(boolean blockCacheEnabled) { - setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { + return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); } /** @@ -697,9 +721,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param bt bloom filter type + * @return this (for chained invocation) */ - public void setBloomFilterType(final StoreFile.BloomType bt) { - setValue(BLOOMFILTER, bt.toString()); + public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) { + return setValue(BLOOMFILTER, bt.toString()); } /** @@ -715,9 +740,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param scope the scope tag + * @return this (for chained invocation) */ - public void setScope(int scope) { - setValue(REPLICATION_SCOPE, Integer.toString(scope)); + public HColumnDescriptor setScope(int scope) { + return setValue(REPLICATION_SCOPE, Integer.toString(scope)); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 472a22e4a8d..503b9cb35e9 100644 --- a/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1113,21 +1113,26 @@ public class HTableDescriptor implements WritableComparable { /** Table descriptor for -ROOT- catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debugging. - Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL) }); + new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setTimeToLive(HConstants.FOREVER) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debugging. - Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL)}); - + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); public void setOwner(User owner) { setOwnerString(owner != null ? owner.getShortName() : null); diff --git a/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java b/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java index d99f02a1953..301ea12b52b 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java +++ b/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java @@ -39,7 +39,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[]) */ @Override - public void setValue(byte[] key, byte[] value) { + public HColumnDescriptor setValue(byte[] key, byte[] value) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -47,7 +47,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String) */ @Override - public void setValue(String key, String value) { + public HColumnDescriptor setValue(String key, String value) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -55,7 +55,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int) */ @Override - public void setMaxVersions(int maxVersions) { + public HColumnDescriptor setMaxVersions(int maxVersions) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -63,7 +63,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean) */ @Override - public void setInMemory(boolean inMemory) { + public HColumnDescriptor setInMemory(boolean inMemory) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -71,7 +71,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean) */ @Override - public void setBlockCacheEnabled(boolean blockCacheEnabled) { + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -79,7 +79,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int) */ @Override - public void setTimeToLive(int timeToLive) { + public HColumnDescriptor setTimeToLive(int timeToLive) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -87,7 +87,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression.Algorithm) */ @Override - public void setCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } } \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index 790f034f071..d7fa95e5796 100644 --- a/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ b/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -57,9 +57,13 @@ public class ThriftUtilities { throw new IllegalArgument("column name is empty"); } byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0]; - HColumnDescriptor col = new HColumnDescriptor(parsedName, - in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled, - in.timeToLive, bt.toString()); + HColumnDescriptor col = new HColumnDescriptor(parsedName) + .setMaxVersions(in.maxVersions) + .setCompressionType(comp) + .setInMemory(in.inMemory) + .setBlockCacheEnabled(in.blockCacheEnabled) + .setTimeToLive(in.timeToLive) + .setBloomFilterType(bt); return col; } diff --git a/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index e3ddc29b0cd..5fb9e2c81bc 100644 --- a/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -220,33 +220,15 @@ public abstract class HBaseTestCase extends TestCase { protected HTableDescriptor createTableDescriptor(final String name, final int minVersions, final int versions, final int ttl, boolean keepDeleted) { HTableDescriptor htd = new HTableDescriptor(name); - htd.addFamily(new HColumnDescriptor(fam1, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam2, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam3, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { + htd.addFamily(new HColumnDescriptor(cfName) + .setMinVersions(minVersions) + .setMaxVersions(versions) + .setKeepDeletedCells(keepDeleted) + .setBlockCacheEnabled(false) + .setTimeToLive(ttl) + ); + } return htd; } diff --git a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index ec9d581c7f5..849e95652f1 100644 --- a/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -67,11 +66,9 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Keying; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; @@ -775,13 +772,8 @@ public class HBaseTestingUtility { throws IOException{ HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc, startKey, endKey, numRegions); @@ -821,13 +813,8 @@ public class HBaseTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for(byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -860,13 +847,8 @@ public class HBaseTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -885,13 +867,9 @@ public class HBaseTestingUtility { int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - blockSize, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions) + .setBlocksize(blockSize); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -912,13 +890,8 @@ public class HBaseTestingUtility { HTableDescriptor desc = new HTableDescriptor(tableName); int i = 0; for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i], - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions[i]); desc.addFamily(hcd); i++; } @@ -1930,23 +1903,6 @@ public class HBaseTestingUtility { return hloc.getPort(); } - public HRegion createTestRegion(String tableName, String cfName, - Compression.Algorithm comprAlgo, BloomType bloomType, int maxVersions, - int blockSize, DataBlockEncoding encoding, boolean encodeOnDisk) - throws IOException { - HColumnDescriptor hcd = - new HColumnDescriptor(Bytes.toBytes(cfName), maxVersions, - comprAlgo.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - bloomType.toString()); - hcd.setBlocksize(blockSize); - hcd.setDataBlockEncoding(encoding); - hcd.setEncodeOnDisk(encodeOnDisk); - return createTestRegion(tableName, hcd); - } - public HRegion createTestRegion(String tableName, HColumnDescriptor hcd) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); diff --git a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 0d7d0d2ead8..21c92fff72e 100644 --- a/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -558,21 +558,18 @@ public class TestSerialization { protected HTableDescriptor createTableDescriptor(final String name, final int versions) { HTableDescriptor htd = new HTableDescriptor(name); - htd.addFamily(new HColumnDescriptor(fam1, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam2, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam3, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + htd.addFamily(new HColumnDescriptor(fam1) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); + htd.addFamily(new HColumnDescriptor(fam2) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); + htd.addFamily(new HColumnDescriptor(fam3) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); return htd; } diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index a10f31c78a4..bdeaefefb94 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -157,19 +157,8 @@ public class TestFromClientSide { final byte[] T1 = Bytes.toBytes("T1"); final byte[] T2 = Bytes.toBytes("T2"); final byte[] T3 = Bytes.toBytes("T3"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - HColumnDescriptor.DEFAULT_VERSIONS, - true, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) + .setKeepDeletedCells(true); HTableDescriptor desc = new HTableDescriptor(TABLENAME); desc.addFamily(hcd); diff --git a/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index 26c76f9e1d9..f40afe48b7b 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -94,9 +95,12 @@ public class TestEncodedSeekers { new CacheConfig(testUtil.getConfiguration()).getBlockCache(); cache.clearCache(); - HRegion region = testUtil.createTestRegion(TABLE_NAME, CF_NAME, - Algorithm.NONE, BloomType.NONE, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - encoding, encodeOnDisk); + HRegion region = testUtil.createTestRegion( + TABLE_NAME, new HColumnDescriptor(CF_NAME) + .setMaxVersions(MAX_VERSIONS) + .setDataBlockEncoding(encoding) + .setEncodeOnDisk(encodeOnDisk) + ); LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator( MIN_VALUE_SIZE, MAX_VALUE_SIZE); diff --git a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 995a26e1f14..687695a7d36 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Put; @@ -315,10 +316,15 @@ public class TestCacheOnWrite { final String cf = "myCF"; final byte[] cfBytes = Bytes.toBytes(cf); final int maxVersions = 3; - HRegion region = TEST_UTIL.createTestRegion(table, cf, compress, - BLOOM_TYPE, maxVersions, HFile.DEFAULT_BLOCKSIZE, - encoder.getEncodingInCache(), - encoder.getEncodingOnDisk() != DataBlockEncoding.NONE); + HRegion region = TEST_UTIL.createTestRegion(table, + new HColumnDescriptor(cf) + .setCompressionType(compress) + .setBloomFilterType(BLOOM_TYPE) + .setMaxVersions(maxVersions) + .setDataBlockEncoding(encoder.getEncodingInCache()) + .setEncodeOnDisk(encoder.getEncodingOnDisk() != + DataBlockEncoding.NONE) + ); int rowIdx = 0; long ts = EnvironmentEdgeManager.currentTimeMillis(); for (int iFile = 0; iFile < 5; ++iFile) { diff --git a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 9e3833255fa..5f8214e0db1 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -104,12 +104,10 @@ public class TestForceCacheImportantBlocks { SchemaMetrics.setUseTableNameInTest(false); HColumnDescriptor hcd = - new HColumnDescriptor(Bytes.toBytes(CF), MAX_VERSIONS, - COMPRESSION_ALGORITHM.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - BLOOM_TYPE.toString()); + new HColumnDescriptor(Bytes.toBytes(CF)) + .setMaxVersions(MAX_VERSIONS) + .setCompressionType(COMPRESSION_ALGORITHM) + .setBloomFilterType(BLOOM_TYPE); hcd.setBlocksize(BLOCK_SIZE); hcd.setBlockCacheEnabled(cfCacheEnabled); HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); diff --git a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 52a1daae0a4..17284a3880a 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -99,12 +99,9 @@ public class TestScannerSelectionUsingTTL { @Test public void testScannerSelection() throws IOException { HColumnDescriptor hcd = - new HColumnDescriptor(FAMILY_BYTES, Integer.MAX_VALUE, - Compression.Algorithm.NONE.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - TTL_SECONDS, - BloomType.NONE.toString()); + new HColumnDescriptor(FAMILY_BYTES) + .setMaxVersions(Integer.MAX_VALUE) + .setTimeToLive(TTL_SECONDS); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE)); diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index b606fad484e..a6bcbac4495 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -511,8 +511,11 @@ public class TestHFileOutputFormat { { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey().getBytes(), 1, entry.getValue().getName(), - false, false, 0, "none")); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) + .setMaxVersions(1) + .setCompressionType(entry.getValue()) + .setBlockCacheEnabled(false) + .setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 0029d948461..a3faeb3380b 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -145,19 +145,10 @@ public class TestImportExport { public void testWithDeletes() throws Exception { String EXPORT_TABLE = "exportWithDeletes"; HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE); - desc.addFamily(new HColumnDescriptor(FAMILYA, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - 5, /* versions */ - true /* keep deleted cells */, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + desc.addFamily(new HColumnDescriptor(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(true) + ); UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(UTIL.getConfiguration(), EXPORT_TABLE); @@ -193,19 +184,10 @@ public class TestImportExport { String IMPORT_TABLE = "importWithDeletes"; desc = new HTableDescriptor(IMPORT_TABLE); - desc.addFamily(new HColumnDescriptor(FAMILYA, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - 5, /* versions */ - true /* keep deleted cells */, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + desc.addFamily(new HColumnDescriptor(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(true) + ); UTIL.getHBaseAdmin().createTable(desc); t.close(); t = new HTable(UTIL.getConfiguration(), IMPORT_TABLE); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index 6ccb109098e..bd83a7116b4 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -28,7 +28,14 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestCase; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -36,7 +43,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.LruBlockCache; +import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.junit.Test; @@ -45,7 +52,8 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestBlocksRead extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestBlocksRead.class); - static final String[] BLOOM_TYPE = new String[] { "ROWCOL", "ROW", "NONE" }; + static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, + BloomType.ROW, BloomType.NONE }; private static BlockCache blockCache; @@ -82,16 +90,10 @@ public class TestBlocksRead extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor familyDesc; for (int i = 0; i < BLOOM_TYPE.length; i++) { - String bloomType = BLOOM_TYPE[i]; - familyDesc = new HColumnDescriptor( - Bytes.toBytes(family + "_" + bloomType), - HColumnDescriptor.DEFAULT_VERSIONS, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - 1, // small block size deliberate; each kv on its own block - HColumnDescriptor.DEFAULT_TTL, BLOOM_TYPE[i], - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + BloomType bloomType = BLOOM_TYPE[i]; + familyDesc = new HColumnDescriptor(family + "_" + bloomType) + .setBlocksize(1) + .setBloomFilterType(BLOOM_TYPE[i]); htd.addFamily(familyDesc); } @@ -138,7 +140,7 @@ public class TestBlocksRead extends HBaseTestCase { KeyValue[] kvs = null; for (int i = 0; i < BLOOM_TYPE.length; i++) { - String bloomType = BLOOM_TYPE[i]; + BloomType bloomType = BLOOM_TYPE[i]; byte[] cf = Bytes.toBytes(family + "_" + bloomType); long blocksStart = getBlkAccessCount(cf); Get get = new Get(Bytes.toBytes(row)); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 3a9c9fb38b2..84296ab6f36 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -55,12 +55,7 @@ public class TestColumnSeeking { String table = "TestDuplicateVersions"; HColumnDescriptor hcd = - new HColumnDescriptor(familyBytes, 1000, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER); + new HColumnDescriptor(familyBytes).setMaxVersions(1000); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 91b869e59fd..eebef460f0d 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -168,9 +168,10 @@ public class TestFSErrorsExposed { HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor( - fam, 1, HColumnDescriptor.DEFAULT_COMPRESSION, - false, false, HConstants.FOREVER, "NONE")); + desc.addFamily(new HColumnDescriptor(fam) + .setMaxVersions(1) + .setBlockCacheEnabled(false) + ); admin.createTable(desc); // Make it fail faster. util.getConfiguration().setInt("hbase.client.retries.number", 1); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 97c624d41a0..8db43a41ae0 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -3033,9 +3033,9 @@ public class TestHRegion extends HBaseTestCase { byte [] qf1 = Bytes.toBytes("col"); byte [] val1 = Bytes.toBytes("value1"); // Create Table - HColumnDescriptor hcd = new HColumnDescriptor(fam1, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, false, true, - HColumnDescriptor.DEFAULT_TTL, "rowcol"); + HColumnDescriptor hcd = new HColumnDescriptor(fam1) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); @@ -3089,13 +3089,9 @@ public class TestHRegion extends HBaseTestCase { byte [] FAMILY = Bytes.toBytes("family"); //Create table - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - "rowcol", - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); @@ -3138,9 +3134,9 @@ public class TestHRegion extends HBaseTestCase { byte [] familyName = Bytes.toBytes("familyName"); // Create Table - HColumnDescriptor hcd = new HColumnDescriptor(familyName, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, false, true, - HColumnDescriptor.DEFAULT_TTL, "rowcol"); + HColumnDescriptor hcd = new HColumnDescriptor(familyName) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 3de3ec5d4f4..b716c53c0ee 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -39,6 +39,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.MediumTests; @@ -150,9 +151,13 @@ public class TestMultiColumnScanner { @Test public void testMultiColumnScanner() throws IOException { - HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY, comprAlgo, - bloomType, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - dataBlockEncoding, true); + HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, + new HColumnDescriptor(FAMILY) + .setCompressionType(comprAlgo) + .setBloomFilterType(bloomType) + .setMaxVersions(MAX_VERSIONS) + .setDataBlockEncoding(dataBlockEncoding) + ); List rows = sequentialStrings("row", NUM_ROWS); List qualifiers = sequentialStrings("qual", NUM_COLUMNS); List kvs = new ArrayList(); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 786bf8254f4..bdb1231d97a 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.SmallTests; @@ -102,10 +103,11 @@ public class TestScanWithBloomError { @Test public void testThreeStoreFiles() throws IOException { - region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY, - Compression.Algorithm.GZ, bloomType, - TestMultiColumnScanner.MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - DataBlockEncoding.NONE, true); + region = TEST_UTIL.createTestRegion(TABLE_NAME, + new HColumnDescriptor(FAMILY) + .setCompressionType(Compression.Algorithm.GZ) + .setBloomFilterType(bloomType) + .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS)); createStoreFile(new int[] {1, 2, 6}); createStoreFile(new int[] {1, 2, 3, 7}); createStoreFile(new int[] {1, 9}); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 6e95bb24d0b..32e8d189fcf 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -67,11 +67,13 @@ public class TestScanner extends HBaseTestCase { static final HTableDescriptor TESTTABLEDESC = new HTableDescriptor("testscanner"); static { - TESTTABLEDESC.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL)); + TESTTABLEDESC.addFamily( + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is an arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setBlockCacheEnabled(false) + .setBlocksize(8 * 1024) + ); } /** HRegionInfo for root region */ public static final HRegionInfo REGION_INFO = diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index 9a77ebfeec1..2a092e7f883 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -37,6 +37,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; @@ -142,8 +143,10 @@ public class TestSeekOptimizations { @Test public void testMultipleTimestampRanges() throws IOException { region = TEST_UTIL.createTestRegion(TestSeekOptimizations.class.getName(), - FAMILY, comprAlgo, bloomType, Integer.MAX_VALUE, - HFile.DEFAULT_BLOCKSIZE, DataBlockEncoding.NONE, true); + new HColumnDescriptor(FAMILY) + .setCompressionType(comprAlgo) + .setBloomFilterType(bloomType) + ); // Delete the given timestamp and everything before. final long latestDelTS = USE_MANY_STORE_FILES ? 1397 : -1; diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 5ce665371a6..09d3151cd21 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -48,21 +48,13 @@ public class TestWideScanner extends HBaseTestCase { static final HTableDescriptor TESTTABLEDESC = new HTableDescriptor("testwidescan"); static { - TESTTABLEDESC.addFamily(new HColumnDescriptor(A, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); - TESTTABLEDESC.addFamily(new HColumnDescriptor(B, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); - TESTTABLEDESC.addFamily(new HColumnDescriptor(C, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); + for (byte[] cfName : new byte[][] { A, B, C }) { + TESTTABLEDESC.addFamily(new HColumnDescriptor(cfName) + // Keep versions to help debugging. + .setMaxVersions(100) + .setBlocksize(8 * 1024) + ); + } } /** HRegionInfo for root region */ diff --git a/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 3aa49507ba2..b848ac73292 100644 --- a/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -87,8 +87,9 @@ public class TestThriftHBaseServiceHandler { private static byte[] valueBname = Bytes.toBytes("valueB"); private static HColumnDescriptor[] families = new HColumnDescriptor[] { new HColumnDescriptor(familyAname), - new HColumnDescriptor(familyBname, 2, HColumnDescriptor.DEFAULT_COMPRESSION, HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER) }; + new HColumnDescriptor(familyBname) + .setMaxVersions(2) + }; public void assertTColumnValuesEqual(List columnValuesA, List columnValuesB) { assertEquals(columnValuesA.size(), columnValuesB.size());