[jira] [HBASE-5357] Refactoring: use the builder pattern for HColumnDescriptor
Summary: Deprecate three ugly HColumnDescriptor constructors and use a "builder" pattern instead, e.g. new HColumnDescriptor(cfName).setMaxVersions(5).setBlockSize(8192), etc. Setters have now become "builder" methods. Test Plan: Run unit tests Reviewers: JIRA, todd, stack, tedyu, Kannan, Karthik, Liyin Reviewed By: tedyu Differential Revision: https://reviews.facebook.net/D1851 git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1293026 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b0d4403059
commit
b77dca0c7f
|
@ -244,7 +244,9 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated use {@link #HColumnDescriptor(String)} and setters
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
||||
final String compression, final boolean inMemory,
|
||||
final boolean blockCacheEnabled,
|
||||
|
@ -274,7 +276,9 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated use {@link #HColumnDescriptor(String)} and setters
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
||||
final String compression, final boolean inMemory,
|
||||
final boolean blockCacheEnabled, final int blocksize,
|
||||
|
@ -312,7 +316,9 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated use {@link #HColumnDescriptor(String)} and setters
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte[] familyName, final int minVersions,
|
||||
final int maxVersions, final boolean keepDeletedCells,
|
||||
final String compression, final boolean encodeOnDisk,
|
||||
|
@ -427,10 +433,12 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param key The key.
|
||||
* @param value The value.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setValue(byte[] key, byte[] value) {
|
||||
public HColumnDescriptor setValue(byte[] key, byte[] value) {
|
||||
values.put(new ImmutableBytesWritable(key),
|
||||
new ImmutableBytesWritable(value));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -443,9 +451,11 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param key The key.
|
||||
* @param value The value.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setValue(String key, String value) {
|
||||
public HColumnDescriptor setValue(String key, String value) {
|
||||
setValue(Bytes.toBytes(key), Bytes.toBytes(value));
|
||||
return this;
|
||||
}
|
||||
|
||||
/** @return compression type being used for the column family */
|
||||
|
@ -474,10 +484,12 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
|
||||
/**
|
||||
* @param maxVersions maximum number of versions
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setMaxVersions(int maxVersions) {
|
||||
public HColumnDescriptor setMaxVersions(int maxVersions) {
|
||||
setValue(HConstants.VERSIONS, Integer.toString(maxVersions));
|
||||
cachedMaxVersions = maxVersions;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -495,10 +507,12 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param s Blocksize to use when writing out storefiles/hfiles on this
|
||||
* column family.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setBlocksize(int s) {
|
||||
public HColumnDescriptor setBlocksize(int s) {
|
||||
setValue(BLOCKSIZE, Integer.toString(s));
|
||||
this.blocksize = null;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -514,8 +528,9 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
* See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
|
||||
* for how to enable it.
|
||||
* @param type Compression type setting.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setCompressionType(Compression.Algorithm type) {
|
||||
public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
|
||||
String compressionType;
|
||||
switch (type) {
|
||||
case LZO: compressionType = "LZO"; break;
|
||||
|
@ -523,7 +538,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
case SNAPPY: compressionType = "SNAPPY"; break;
|
||||
default: compressionType = "NONE"; break;
|
||||
}
|
||||
setValue(COMPRESSION, compressionType);
|
||||
return setValue(COMPRESSION, compressionType);
|
||||
}
|
||||
|
||||
/** @return data block encoding algorithm used on disk */
|
||||
|
@ -546,9 +561,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* Set the flag indicating that we only want to encode data block in cache
|
||||
* but not on disk.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setEncodeOnDisk(boolean encodeOnDisk) {
|
||||
setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk));
|
||||
public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
|
||||
return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -566,15 +582,16 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* Set data block encoding algorithm used in block cache.
|
||||
* @param type What kind of data block encoding will be used.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setDataBlockEncoding(DataBlockEncoding type) {
|
||||
public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) {
|
||||
String name;
|
||||
if (type != null) {
|
||||
name = type.toString();
|
||||
} else {
|
||||
name = DataBlockEncoding.NONE.toString();
|
||||
}
|
||||
setValue(DATA_BLOCK_ENCODING, name);
|
||||
return setValue(DATA_BLOCK_ENCODING, name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -590,8 +607,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
* See <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO Compression</a>
|
||||
* for how to enable it.
|
||||
* @param type Compression type setting.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setCompactionCompressionType(Compression.Algorithm type) {
|
||||
public HColumnDescriptor setCompactionCompressionType(
|
||||
Compression.Algorithm type) {
|
||||
String compressionType;
|
||||
switch (type) {
|
||||
case LZO: compressionType = "LZO"; break;
|
||||
|
@ -599,7 +618,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
case SNAPPY: compressionType = "SNAPPY"; break;
|
||||
default: compressionType = "NONE"; break;
|
||||
}
|
||||
setValue(COMPRESSION_COMPACT, compressionType);
|
||||
return setValue(COMPRESSION_COMPACT, compressionType);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -615,9 +634,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param inMemory True if we are to keep all values in the HRegionServer
|
||||
* cache
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setInMemory(boolean inMemory) {
|
||||
setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
|
||||
public HColumnDescriptor setInMemory(boolean inMemory) {
|
||||
return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory));
|
||||
}
|
||||
|
||||
public boolean getKeepDeletedCells() {
|
||||
|
@ -631,9 +651,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param keepDeletedCells True if deleted rows should not be collected
|
||||
* immediately.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setKeepDeletedCells(boolean keepDeletedCells) {
|
||||
setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
|
||||
public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
|
||||
return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -646,9 +667,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
|
||||
/**
|
||||
* @param timeToLive Time-to-live of cell contents, in seconds.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setTimeToLive(int timeToLive) {
|
||||
setValue(TTL, Integer.toString(timeToLive));
|
||||
public HColumnDescriptor setTimeToLive(int timeToLive) {
|
||||
return setValue(TTL, Integer.toString(timeToLive));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -662,9 +684,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
/**
|
||||
* @param minVersions The minimum number of versions to keep.
|
||||
* (used when timeToLive is set)
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setMinVersions(int minVersions) {
|
||||
setValue(MIN_VERSIONS, Integer.toString(minVersions));
|
||||
public HColumnDescriptor setMinVersions(int minVersions) {
|
||||
return setValue(MIN_VERSIONS, Integer.toString(minVersions));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -679,9 +702,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
|
||||
/**
|
||||
* @param blockCacheEnabled True if MapFile blocks should be cached.
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setBlockCacheEnabled(boolean blockCacheEnabled) {
|
||||
setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
|
||||
public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
|
||||
return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -697,9 +721,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
|
||||
/**
|
||||
* @param bt bloom filter type
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setBloomFilterType(final StoreFile.BloomType bt) {
|
||||
setValue(BLOOMFILTER, bt.toString());
|
||||
public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) {
|
||||
return setValue(BLOOMFILTER, bt.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -715,9 +740,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
|
||||
/**
|
||||
* @param scope the scope tag
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public void setScope(int scope) {
|
||||
setValue(REPLICATION_SCOPE, Integer.toString(scope));
|
||||
public HColumnDescriptor setScope(int scope) {
|
||||
return setValue(REPLICATION_SCOPE, Integer.toString(scope));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1113,21 +1113,26 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
/** Table descriptor for <core>-ROOT-</code> catalog table */
|
||||
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.ROOT_TABLE_NAME,
|
||||
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY,
|
||||
10, // Ten is arbitrary number. Keep versions to help debugging.
|
||||
Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HConstants.REPLICATION_SCOPE_LOCAL) });
|
||||
new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
// Ten is arbitrary number. Keep versions to help debugging.
|
||||
.setMaxVersions(10)
|
||||
.setInMemory(true)
|
||||
.setBlocksize(8 * 1024)
|
||||
.setTimeToLive(HConstants.FOREVER)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
});
|
||||
|
||||
/** Table descriptor for <code>.META.</code> catalog table */
|
||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY,
|
||||
10, // Ten is arbitrary number. Keep versions to help debugging.
|
||||
Compression.Algorithm.NONE.getName(), true, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HConstants.REPLICATION_SCOPE_LOCAL)});
|
||||
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
// Ten is arbitrary number. Keep versions to help debugging.
|
||||
.setMaxVersions(10)
|
||||
.setInMemory(true)
|
||||
.setBlocksize(8 * 1024)
|
||||
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
|
||||
});
|
||||
|
||||
public void setOwner(User owner) {
|
||||
setOwnerString(owner != null ? owner.getShortName() : null);
|
||||
|
|
|
@ -39,7 +39,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[])
|
||||
*/
|
||||
@Override
|
||||
public void setValue(byte[] key, byte[] value) {
|
||||
public HColumnDescriptor setValue(byte[] key, byte[] value) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String)
|
||||
*/
|
||||
@Override
|
||||
public void setValue(String key, String value) {
|
||||
public HColumnDescriptor setValue(String key, String value) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int)
|
||||
*/
|
||||
@Override
|
||||
public void setMaxVersions(int maxVersions) {
|
||||
public HColumnDescriptor setMaxVersions(int maxVersions) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setInMemory(boolean inMemory) {
|
||||
public HColumnDescriptor setInMemory(boolean inMemory) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean)
|
||||
*/
|
||||
@Override
|
||||
public void setBlockCacheEnabled(boolean blockCacheEnabled) {
|
||||
public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int)
|
||||
*/
|
||||
@Override
|
||||
public void setTimeToLive(int timeToLive) {
|
||||
public HColumnDescriptor setTimeToLive(int timeToLive) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
|||
* @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression.Algorithm)
|
||||
*/
|
||||
@Override
|
||||
public void setCompressionType(Compression.Algorithm type) {
|
||||
public HColumnDescriptor setCompressionType(Compression.Algorithm type) {
|
||||
throw new UnsupportedOperationException("HColumnDescriptor is read-only");
|
||||
}
|
||||
}
|
|
@ -57,9 +57,13 @@ public class ThriftUtilities {
|
|||
throw new IllegalArgument("column name is empty");
|
||||
}
|
||||
byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0];
|
||||
HColumnDescriptor col = new HColumnDescriptor(parsedName,
|
||||
in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled,
|
||||
in.timeToLive, bt.toString());
|
||||
HColumnDescriptor col = new HColumnDescriptor(parsedName)
|
||||
.setMaxVersions(in.maxVersions)
|
||||
.setCompressionType(comp)
|
||||
.setInMemory(in.inMemory)
|
||||
.setBlockCacheEnabled(in.blockCacheEnabled)
|
||||
.setTimeToLive(in.timeToLive)
|
||||
.setBloomFilterType(bt);
|
||||
return col;
|
||||
}
|
||||
|
||||
|
|
|
@ -220,33 +220,15 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
protected HTableDescriptor createTableDescriptor(final String name,
|
||||
final int minVersions, final int versions, final int ttl, boolean keepDeleted) {
|
||||
HTableDescriptor htd = new HTableDescriptor(name);
|
||||
htd.addFamily(new HColumnDescriptor(fam1, minVersions, versions,
|
||||
keepDeleted,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
htd.addFamily(new HColumnDescriptor(fam2, minVersions, versions,
|
||||
keepDeleted,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
htd.addFamily(new HColumnDescriptor(fam3, minVersions, versions,
|
||||
keepDeleted,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) {
|
||||
htd.addFamily(new HColumnDescriptor(cfName)
|
||||
.setMinVersions(minVersions)
|
||||
.setMaxVersions(versions)
|
||||
.setKeepDeletedCells(keepDeleted)
|
||||
.setBlockCacheEnabled(false)
|
||||
.setTimeToLive(ttl)
|
||||
);
|
||||
}
|
||||
return htd;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
|
@ -67,11 +66,9 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
|||
import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Keying;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
@ -775,13 +772,8 @@ public class HBaseTestingUtility {
|
|||
throws IOException{
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
for (byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family)
|
||||
.setMaxVersions(numVersions);
|
||||
desc.addFamily(hcd);
|
||||
}
|
||||
getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
|
||||
|
@ -821,13 +813,8 @@ public class HBaseTestingUtility {
|
|||
throws IOException {
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
for(byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family)
|
||||
.setMaxVersions(numVersions);
|
||||
desc.addFamily(hcd);
|
||||
}
|
||||
getHBaseAdmin().createTable(desc);
|
||||
|
@ -860,13 +847,8 @@ public class HBaseTestingUtility {
|
|||
throws IOException {
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
for (byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family)
|
||||
.setMaxVersions(numVersions);
|
||||
desc.addFamily(hcd);
|
||||
}
|
||||
getHBaseAdmin().createTable(desc);
|
||||
|
@ -885,13 +867,9 @@ public class HBaseTestingUtility {
|
|||
int numVersions, int blockSize) throws IOException {
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
for (byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
blockSize, HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family)
|
||||
.setMaxVersions(numVersions)
|
||||
.setBlocksize(blockSize);
|
||||
desc.addFamily(hcd);
|
||||
}
|
||||
getHBaseAdmin().createTable(desc);
|
||||
|
@ -912,13 +890,8 @@ public class HBaseTestingUtility {
|
|||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
int i = 0;
|
||||
for (byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family)
|
||||
.setMaxVersions(numVersions[i]);
|
||||
desc.addFamily(hcd);
|
||||
i++;
|
||||
}
|
||||
|
@ -1930,23 +1903,6 @@ public class HBaseTestingUtility {
|
|||
return hloc.getPort();
|
||||
}
|
||||
|
||||
public HRegion createTestRegion(String tableName, String cfName,
|
||||
Compression.Algorithm comprAlgo, BloomType bloomType, int maxVersions,
|
||||
int blockSize, DataBlockEncoding encoding, boolean encodeOnDisk)
|
||||
throws IOException {
|
||||
HColumnDescriptor hcd =
|
||||
new HColumnDescriptor(Bytes.toBytes(cfName), maxVersions,
|
||||
comprAlgo.getName(),
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
bloomType.toString());
|
||||
hcd.setBlocksize(blockSize);
|
||||
hcd.setDataBlockEncoding(encoding);
|
||||
hcd.setEncodeOnDisk(encodeOnDisk);
|
||||
return createTestRegion(tableName, hcd);
|
||||
}
|
||||
|
||||
public HRegion createTestRegion(String tableName, HColumnDescriptor hcd)
|
||||
throws IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
|
|
|
@ -558,21 +558,18 @@ public class TestSerialization {
|
|||
protected HTableDescriptor createTableDescriptor(final String name,
|
||||
final int versions) {
|
||||
HTableDescriptor htd = new HTableDescriptor(name);
|
||||
htd.addFamily(new HColumnDescriptor(fam1, versions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
htd.addFamily(new HColumnDescriptor(fam2, versions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
htd.addFamily(new HColumnDescriptor(fam3, versions,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION, false, false,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
htd.addFamily(new HColumnDescriptor(fam1)
|
||||
.setMaxVersions(versions)
|
||||
.setBlockCacheEnabled(false)
|
||||
);
|
||||
htd.addFamily(new HColumnDescriptor(fam2)
|
||||
.setMaxVersions(versions)
|
||||
.setBlockCacheEnabled(false)
|
||||
);
|
||||
htd.addFamily(new HColumnDescriptor(fam3)
|
||||
.setMaxVersions(versions)
|
||||
.setBlockCacheEnabled(false)
|
||||
);
|
||||
return htd;
|
||||
}
|
||||
|
||||
|
|
|
@ -157,19 +157,8 @@ public class TestFromClientSide {
|
|||
final byte[] T1 = Bytes.toBytes("T1");
|
||||
final byte[] T2 = Bytes.toBytes("T2");
|
||||
final byte[] T3 = Bytes.toBytes("T3");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY,
|
||||
HColumnDescriptor.DEFAULT_MIN_VERSIONS,
|
||||
HColumnDescriptor.DEFAULT_VERSIONS,
|
||||
true,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
|
||||
.setKeepDeletedCells(true);
|
||||
|
||||
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
|
||||
desc.addFamily(hcd);
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -94,9 +95,12 @@ public class TestEncodedSeekers {
|
|||
new CacheConfig(testUtil.getConfiguration()).getBlockCache();
|
||||
cache.clearCache();
|
||||
|
||||
HRegion region = testUtil.createTestRegion(TABLE_NAME, CF_NAME,
|
||||
Algorithm.NONE, BloomType.NONE, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE,
|
||||
encoding, encodeOnDisk);
|
||||
HRegion region = testUtil.createTestRegion(
|
||||
TABLE_NAME, new HColumnDescriptor(CF_NAME)
|
||||
.setMaxVersions(MAX_VERSIONS)
|
||||
.setDataBlockEncoding(encoding)
|
||||
.setEncodeOnDisk(encodeOnDisk)
|
||||
);
|
||||
LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(
|
||||
MIN_VALUE_SIZE, MAX_VALUE_SIZE);
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -315,10 +316,15 @@ public class TestCacheOnWrite {
|
|||
final String cf = "myCF";
|
||||
final byte[] cfBytes = Bytes.toBytes(cf);
|
||||
final int maxVersions = 3;
|
||||
HRegion region = TEST_UTIL.createTestRegion(table, cf, compress,
|
||||
BLOOM_TYPE, maxVersions, HFile.DEFAULT_BLOCKSIZE,
|
||||
encoder.getEncodingInCache(),
|
||||
encoder.getEncodingOnDisk() != DataBlockEncoding.NONE);
|
||||
HRegion region = TEST_UTIL.createTestRegion(table,
|
||||
new HColumnDescriptor(cf)
|
||||
.setCompressionType(compress)
|
||||
.setBloomFilterType(BLOOM_TYPE)
|
||||
.setMaxVersions(maxVersions)
|
||||
.setDataBlockEncoding(encoder.getEncodingInCache())
|
||||
.setEncodeOnDisk(encoder.getEncodingOnDisk() !=
|
||||
DataBlockEncoding.NONE)
|
||||
);
|
||||
int rowIdx = 0;
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis();
|
||||
for (int iFile = 0; iFile < 5; ++iFile) {
|
||||
|
|
|
@ -104,12 +104,10 @@ public class TestForceCacheImportantBlocks {
|
|||
|
||||
SchemaMetrics.setUseTableNameInTest(false);
|
||||
HColumnDescriptor hcd =
|
||||
new HColumnDescriptor(Bytes.toBytes(CF), MAX_VERSIONS,
|
||||
COMPRESSION_ALGORITHM.getName(),
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
BLOOM_TYPE.toString());
|
||||
new HColumnDescriptor(Bytes.toBytes(CF))
|
||||
.setMaxVersions(MAX_VERSIONS)
|
||||
.setCompressionType(COMPRESSION_ALGORITHM)
|
||||
.setBloomFilterType(BLOOM_TYPE);
|
||||
hcd.setBlocksize(BLOCK_SIZE);
|
||||
hcd.setBlockCacheEnabled(cfCacheEnabled);
|
||||
HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
|
||||
|
|
|
@ -99,12 +99,9 @@ public class TestScannerSelectionUsingTTL {
|
|||
@Test
|
||||
public void testScannerSelection() throws IOException {
|
||||
HColumnDescriptor hcd =
|
||||
new HColumnDescriptor(FAMILY_BYTES, Integer.MAX_VALUE,
|
||||
Compression.Algorithm.NONE.getName(),
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
TTL_SECONDS,
|
||||
BloomType.NONE.toString());
|
||||
new HColumnDescriptor(FAMILY_BYTES)
|
||||
.setMaxVersions(Integer.MAX_VALUE)
|
||||
.setTimeToLive(TTL_SECONDS);
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(hcd);
|
||||
HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
|
||||
|
|
|
@ -511,8 +511,11 @@ public class TestHFileOutputFormat {
|
|||
{
|
||||
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
|
||||
for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
|
||||
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey().getBytes(), 1, entry.getValue().getName(),
|
||||
false, false, 0, "none"));
|
||||
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
|
||||
.setMaxVersions(1)
|
||||
.setCompressionType(entry.getValue())
|
||||
.setBlockCacheEnabled(false)
|
||||
.setTimeToLive(0));
|
||||
}
|
||||
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
|
||||
}
|
||||
|
|
|
@ -145,19 +145,10 @@ public class TestImportExport {
|
|||
public void testWithDeletes() throws Exception {
|
||||
String EXPORT_TABLE = "exportWithDeletes";
|
||||
HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA,
|
||||
HColumnDescriptor.DEFAULT_MIN_VERSIONS,
|
||||
5, /* versions */
|
||||
true /* keep deleted cells */,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
HTable t = new HTable(UTIL.getConfiguration(), EXPORT_TABLE);
|
||||
|
||||
|
@ -193,19 +184,10 @@ public class TestImportExport {
|
|||
|
||||
String IMPORT_TABLE = "importWithDeletes";
|
||||
desc = new HTableDescriptor(IMPORT_TABLE);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA,
|
||||
HColumnDescriptor.DEFAULT_MIN_VERSIONS,
|
||||
5, /* versions */
|
||||
true /* keep deleted cells */,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_ENCODE_ON_DISK,
|
||||
HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER,
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
t.close();
|
||||
t = new HTable(UTIL.getConfiguration(), IMPORT_TABLE);
|
||||
|
|
|
@ -28,7 +28,14 @@ import java.util.List;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -36,7 +43,7 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
|
||||
import org.junit.Test;
|
||||
|
@ -45,7 +52,8 @@ import org.junit.experimental.categories.Category;
|
|||
@Category(MediumTests.class)
|
||||
public class TestBlocksRead extends HBaseTestCase {
|
||||
static final Log LOG = LogFactory.getLog(TestBlocksRead.class);
|
||||
static final String[] BLOOM_TYPE = new String[] { "ROWCOL", "ROW", "NONE" };
|
||||
static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL,
|
||||
BloomType.ROW, BloomType.NONE };
|
||||
|
||||
private static BlockCache blockCache;
|
||||
|
||||
|
@ -82,16 +90,10 @@ public class TestBlocksRead extends HBaseTestCase {
|
|||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
HColumnDescriptor familyDesc;
|
||||
for (int i = 0; i < BLOOM_TYPE.length; i++) {
|
||||
String bloomType = BLOOM_TYPE[i];
|
||||
familyDesc = new HColumnDescriptor(
|
||||
Bytes.toBytes(family + "_" + bloomType),
|
||||
HColumnDescriptor.DEFAULT_VERSIONS,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
1, // small block size deliberate; each kv on its own block
|
||||
HColumnDescriptor.DEFAULT_TTL, BLOOM_TYPE[i],
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
BloomType bloomType = BLOOM_TYPE[i];
|
||||
familyDesc = new HColumnDescriptor(family + "_" + bloomType)
|
||||
.setBlocksize(1)
|
||||
.setBloomFilterType(BLOOM_TYPE[i]);
|
||||
htd.addFamily(familyDesc);
|
||||
}
|
||||
|
||||
|
@ -138,7 +140,7 @@ public class TestBlocksRead extends HBaseTestCase {
|
|||
KeyValue[] kvs = null;
|
||||
|
||||
for (int i = 0; i < BLOOM_TYPE.length; i++) {
|
||||
String bloomType = BLOOM_TYPE[i];
|
||||
BloomType bloomType = BLOOM_TYPE[i];
|
||||
byte[] cf = Bytes.toBytes(family + "_" + bloomType);
|
||||
long blocksStart = getBlkAccessCount(cf);
|
||||
Get get = new Get(Bytes.toBytes(row));
|
||||
|
|
|
@ -55,12 +55,7 @@ public class TestColumnSeeking {
|
|||
String table = "TestDuplicateVersions";
|
||||
|
||||
HColumnDescriptor hcd =
|
||||
new HColumnDescriptor(familyBytes, 1000,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_TTL,
|
||||
HColumnDescriptor.DEFAULT_BLOOMFILTER);
|
||||
new HColumnDescriptor(familyBytes).setMaxVersions(1000);
|
||||
HTableDescriptor htd = new HTableDescriptor(table);
|
||||
htd.addFamily(hcd);
|
||||
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
|
||||
|
|
|
@ -168,9 +168,10 @@ public class TestFSErrorsExposed {
|
|||
|
||||
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addFamily(new HColumnDescriptor(
|
||||
fam, 1, HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
false, false, HConstants.FOREVER, "NONE"));
|
||||
desc.addFamily(new HColumnDescriptor(fam)
|
||||
.setMaxVersions(1)
|
||||
.setBlockCacheEnabled(false)
|
||||
);
|
||||
admin.createTable(desc);
|
||||
// Make it fail faster.
|
||||
util.getConfiguration().setInt("hbase.client.retries.number", 1);
|
||||
|
|
|
@ -3033,9 +3033,9 @@ public class TestHRegion extends HBaseTestCase {
|
|||
byte [] qf1 = Bytes.toBytes("col");
|
||||
byte [] val1 = Bytes.toBytes("value1");
|
||||
// Create Table
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(fam1, Integer.MAX_VALUE,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION, false, true,
|
||||
HColumnDescriptor.DEFAULT_TTL, "rowcol");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(fam1)
|
||||
.setMaxVersions(Integer.MAX_VALUE)
|
||||
.setBloomFilterType(BloomType.ROWCOL);
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(hcd);
|
||||
|
@ -3089,13 +3089,9 @@ public class TestHRegion extends HBaseTestCase {
|
|||
byte [] FAMILY = Bytes.toBytes("family");
|
||||
|
||||
//Create table
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, Integer.MAX_VALUE,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL,
|
||||
"rowcol",
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
|
||||
.setMaxVersions(Integer.MAX_VALUE)
|
||||
.setBloomFilterType(BloomType.ROWCOL);
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(hcd);
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
|
@ -3138,9 +3134,9 @@ public class TestHRegion extends HBaseTestCase {
|
|||
byte [] familyName = Bytes.toBytes("familyName");
|
||||
|
||||
// Create Table
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(familyName, Integer.MAX_VALUE,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION, false, true,
|
||||
HColumnDescriptor.DEFAULT_TTL, "rowcol");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(familyName)
|
||||
.setMaxVersions(Integer.MAX_VALUE)
|
||||
.setBloomFilterType(BloomType.ROWCOL);
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(hcd);
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.commons.lang.ArrayUtils;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValueTestUtil;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
|
@ -150,9 +151,13 @@ public class TestMultiColumnScanner {
|
|||
|
||||
@Test
|
||||
public void testMultiColumnScanner() throws IOException {
|
||||
HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY, comprAlgo,
|
||||
bloomType, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE,
|
||||
dataBlockEncoding, true);
|
||||
HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME,
|
||||
new HColumnDescriptor(FAMILY)
|
||||
.setCompressionType(comprAlgo)
|
||||
.setBloomFilterType(bloomType)
|
||||
.setMaxVersions(MAX_VERSIONS)
|
||||
.setDataBlockEncoding(dataBlockEncoding)
|
||||
);
|
||||
List<String> rows = sequentialStrings("row", NUM_ROWS);
|
||||
List<String> qualifiers = sequentialStrings("qual", NUM_COLUMNS);
|
||||
List<KeyValue> kvs = new ArrayList<KeyValue>();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValueTestUtil;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
|
@ -102,10 +103,11 @@ public class TestScanWithBloomError {
|
|||
|
||||
@Test
|
||||
public void testThreeStoreFiles() throws IOException {
|
||||
region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY,
|
||||
Compression.Algorithm.GZ, bloomType,
|
||||
TestMultiColumnScanner.MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE,
|
||||
DataBlockEncoding.NONE, true);
|
||||
region = TEST_UTIL.createTestRegion(TABLE_NAME,
|
||||
new HColumnDescriptor(FAMILY)
|
||||
.setCompressionType(Compression.Algorithm.GZ)
|
||||
.setBloomFilterType(bloomType)
|
||||
.setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS));
|
||||
createStoreFile(new int[] {1, 2, 6});
|
||||
createStoreFile(new int[] {1, 2, 3, 7});
|
||||
createStoreFile(new int[] {1, 9});
|
||||
|
|
|
@ -67,11 +67,13 @@ public class TestScanner extends HBaseTestCase {
|
|||
static final HTableDescriptor TESTTABLEDESC =
|
||||
new HTableDescriptor("testscanner");
|
||||
static {
|
||||
TESTTABLEDESC.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY,
|
||||
10, // Ten is arbitrary number. Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HConstants.REPLICATION_SCOPE_LOCAL));
|
||||
TESTTABLEDESC.addFamily(
|
||||
new HColumnDescriptor(HConstants.CATALOG_FAMILY)
|
||||
// Ten is an arbitrary number. Keep versions to help debugging.
|
||||
.setMaxVersions(10)
|
||||
.setBlockCacheEnabled(false)
|
||||
.setBlocksize(8 * 1024)
|
||||
);
|
||||
}
|
||||
/** HRegionInfo for root region */
|
||||
public static final HRegionInfo REGION_INFO =
|
||||
|
|
|
@ -37,6 +37,7 @@ import java.util.Set;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
|
@ -142,8 +143,10 @@ public class TestSeekOptimizations {
|
|||
@Test
|
||||
public void testMultipleTimestampRanges() throws IOException {
|
||||
region = TEST_UTIL.createTestRegion(TestSeekOptimizations.class.getName(),
|
||||
FAMILY, comprAlgo, bloomType, Integer.MAX_VALUE,
|
||||
HFile.DEFAULT_BLOCKSIZE, DataBlockEncoding.NONE, true);
|
||||
new HColumnDescriptor(FAMILY)
|
||||
.setCompressionType(comprAlgo)
|
||||
.setBloomFilterType(bloomType)
|
||||
);
|
||||
|
||||
// Delete the given timestamp and everything before.
|
||||
final long latestDelTS = USE_MANY_STORE_FILES ? 1397 : -1;
|
||||
|
|
|
@ -48,21 +48,13 @@ public class TestWideScanner extends HBaseTestCase {
|
|||
static final HTableDescriptor TESTTABLEDESC =
|
||||
new HTableDescriptor("testwidescan");
|
||||
static {
|
||||
TESTTABLEDESC.addFamily(new HColumnDescriptor(A,
|
||||
100, // Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
|
||||
TESTTABLEDESC.addFamily(new HColumnDescriptor(B,
|
||||
100, // Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
|
||||
TESTTABLEDESC.addFamily(new HColumnDescriptor(C,
|
||||
100, // Keep versions to help debuggging.
|
||||
Compression.Algorithm.NONE.getName(), false, true, 8 * 1024,
|
||||
HConstants.FOREVER, StoreFile.BloomType.NONE.toString(),
|
||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE));
|
||||
for (byte[] cfName : new byte[][] { A, B, C }) {
|
||||
TESTTABLEDESC.addFamily(new HColumnDescriptor(cfName)
|
||||
// Keep versions to help debugging.
|
||||
.setMaxVersions(100)
|
||||
.setBlocksize(8 * 1024)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/** HRegionInfo for root region */
|
||||
|
|
|
@ -87,8 +87,9 @@ public class TestThriftHBaseServiceHandler {
|
|||
private static byte[] valueBname = Bytes.toBytes("valueB");
|
||||
private static HColumnDescriptor[] families = new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(familyAname),
|
||||
new HColumnDescriptor(familyBname, 2, HColumnDescriptor.DEFAULT_COMPRESSION, HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER) };
|
||||
new HColumnDescriptor(familyBname)
|
||||
.setMaxVersions(2)
|
||||
};
|
||||
|
||||
public void assertTColumnValuesEqual(List<TColumnValue> columnValuesA, List<TColumnValue> columnValuesB) {
|
||||
assertEquals(columnValuesA.size(), columnValuesB.size());
|
||||
|
|
Loading…
Reference in New Issue