HBASE-13464 Remove deprecations for 2.0.0 - Part 1 (Lars Francke)
This commit is contained in:
parent
0dfb364723
commit
2ee5f8f5fd
|
@ -85,18 +85,6 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
private String[] masterCoprocessors;
|
private String[] masterCoprocessors;
|
||||||
private Boolean balancerOn;
|
private Boolean balancerOn;
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor, for Writable
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-6038">HBASE-6038</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Used by Writables and Writables are going away.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public ClusterStatus() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
|
|
||||||
public ClusterStatus(final String hbaseVersion, final String clusterid,
|
public ClusterStatus(final String hbaseVersion, final String clusterid,
|
||||||
final Map<ServerName, ServerLoad> servers,
|
final Map<ServerName, ServerLoad> servers,
|
||||||
final Collection<ServerName> deadServers,
|
final Collection<ServerName> deadServers,
|
||||||
|
@ -220,20 +208,6 @@ public class ClusterStatus extends VersionedWritable {
|
||||||
// Getters
|
// Getters
|
||||||
//
|
//
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns detailed region server information: A list of
|
|
||||||
* {@link ServerName}.
|
|
||||||
* @return region server information
|
|
||||||
* @deprecated As of release 0.92
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-1502">HBASE-1502</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #getServers()}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Collection<ServerName> getServerInfo() {
|
|
||||||
return getServers();
|
|
||||||
}
|
|
||||||
|
|
||||||
public Collection<ServerName> getServers() {
|
public Collection<ServerName> getServers() {
|
||||||
if (liveServers == null) {
|
if (liveServers == null) {
|
||||||
return Collections.<ServerName>emptyList();
|
return Collections.<ServerName>emptyList();
|
||||||
|
|
|
@ -285,16 +285,9 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
private int cachedMaxVersions = UNINITIALIZED;
|
private int cachedMaxVersions = UNINITIALIZED;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor.
|
* Default constructor. Must be present for PB deserializations.
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
|
||||||
* This will be made private in HBase 2.0.0.
|
|
||||||
* Used by Writables and Writables are going away.
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
private HColumnDescriptor() {
|
||||||
// Make this private rather than remove after deprecation period elapses. Its needed by pb
|
|
||||||
// deserializations.
|
|
||||||
public HColumnDescriptor() {
|
|
||||||
this.name = null;
|
this.name = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,10 +310,20 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
* letter -- and may not contain a <code>:</code>
|
* letter -- and may not contain a <code>:</code>
|
||||||
*/
|
*/
|
||||||
public HColumnDescriptor(final byte [] familyName) {
|
public HColumnDescriptor(final byte [] familyName) {
|
||||||
this (familyName == null || familyName.length <= 0?
|
isLegalFamilyName(familyName);
|
||||||
HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
|
this.name = familyName;
|
||||||
DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
|
|
||||||
DEFAULT_TTL, DEFAULT_BLOOMFILTER);
|
setMaxVersions(DEFAULT_VERSIONS);
|
||||||
|
setMinVersions(DEFAULT_MIN_VERSIONS);
|
||||||
|
setKeepDeletedCells(DEFAULT_KEEP_DELETED);
|
||||||
|
setInMemory(DEFAULT_IN_MEMORY);
|
||||||
|
setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
|
||||||
|
setTimeToLive(DEFAULT_TTL);
|
||||||
|
setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase()));
|
||||||
|
setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase()));
|
||||||
|
setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase()));
|
||||||
|
setBlocksize(DEFAULT_BLOCKSIZE);
|
||||||
|
setScope(DEFAULT_REPLICATION_SCOPE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -342,148 +345,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
setMaxVersions(desc.getMaxVersions());
|
setMaxVersions(desc.getMaxVersions());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor
|
|
||||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
|
||||||
* letter -- and may not contain a <code>:</code>
|
|
||||||
* @param maxVersions Maximum number of versions to keep
|
|
||||||
* @param compression Compression type
|
|
||||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
|
||||||
* cache
|
|
||||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
|
||||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
|
||||||
* (use HConstants.FOREVER for unlimited TTL)
|
|
||||||
* @param bloomFilter Bloom filter type for this column
|
|
||||||
*
|
|
||||||
* @throws IllegalArgumentException if passed a family name that is made of
|
|
||||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
|
||||||
* a <code>:</code>
|
|
||||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
|
||||||
final String compression, final boolean inMemory,
|
|
||||||
final boolean blockCacheEnabled,
|
|
||||||
final int timeToLive, final String bloomFilter) {
|
|
||||||
this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
|
|
||||||
DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor
|
|
||||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
|
||||||
* letter -- and may not contain a <code>:</code>
|
|
||||||
* @param maxVersions Maximum number of versions to keep
|
|
||||||
* @param compression Compression type
|
|
||||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
|
||||||
* cache
|
|
||||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
|
||||||
* @param blocksize Block size to use when writing out storefiles. Use
|
|
||||||
* smaller block sizes for faster random-access at expense of larger indices
|
|
||||||
* (more memory consumption). Default is usually 64k.
|
|
||||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
|
||||||
* (use HConstants.FOREVER for unlimited TTL)
|
|
||||||
* @param bloomFilter Bloom filter type for this column
|
|
||||||
* @param scope The scope tag for this column
|
|
||||||
*
|
|
||||||
* @throws IllegalArgumentException if passed a family name that is made of
|
|
||||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
|
||||||
* a <code>:</code>
|
|
||||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
|
||||||
final String compression, final boolean inMemory,
|
|
||||||
final boolean blockCacheEnabled, final int blocksize,
|
|
||||||
final int timeToLive, final String bloomFilter, final int scope) {
|
|
||||||
this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
|
|
||||||
compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
|
|
||||||
inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
|
|
||||||
scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor
|
|
||||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
|
||||||
* letter -- and may not contain a <code>:</code>
|
|
||||||
* @param minVersions Minimum number of versions to keep
|
|
||||||
* @param maxVersions Maximum number of versions to keep
|
|
||||||
* @param keepDeletedCells Whether to retain deleted cells until they expire
|
|
||||||
* up to maxVersions versions.
|
|
||||||
* @param compression Compression type
|
|
||||||
* @param encodeOnDisk whether to use the specified data block encoding
|
|
||||||
* on disk. If false, the encoding will be used in cache only.
|
|
||||||
* @param dataBlockEncoding data block encoding
|
|
||||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
|
||||||
* cache
|
|
||||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
|
||||||
* @param blocksize Block size to use when writing out storefiles. Use
|
|
||||||
* smaller blocksizes for faster random-access at expense of larger indices
|
|
||||||
* (more memory consumption). Default is usually 64k.
|
|
||||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
|
||||||
* (use HConstants.FOREVER for unlimited TTL)
|
|
||||||
* @param bloomFilter Bloom filter type for this column
|
|
||||||
* @param scope The scope tag for this column
|
|
||||||
*
|
|
||||||
* @throws IllegalArgumentException if passed a family name that is made of
|
|
||||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
|
||||||
* a <code>:</code>
|
|
||||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HColumnDescriptor(final byte[] familyName, final int minVersions,
|
|
||||||
final int maxVersions, final KeepDeletedCells keepDeletedCells,
|
|
||||||
final String compression, final boolean encodeOnDisk,
|
|
||||||
final String dataBlockEncoding, final boolean inMemory,
|
|
||||||
final boolean blockCacheEnabled, final int blocksize,
|
|
||||||
final int timeToLive, final String bloomFilter, final int scope) {
|
|
||||||
isLegalFamilyName(familyName);
|
|
||||||
this.name = familyName;
|
|
||||||
|
|
||||||
if (maxVersions <= 0) {
|
|
||||||
// TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
|
|
||||||
// Until there is support, consider 0 or < 0 -- a configuration error.
|
|
||||||
throw new IllegalArgumentException("Maximum versions must be positive");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (minVersions > 0) {
|
|
||||||
if (timeToLive == HConstants.FOREVER) {
|
|
||||||
throw new IllegalArgumentException("Minimum versions requires TTL.");
|
|
||||||
}
|
|
||||||
if (minVersions >= maxVersions) {
|
|
||||||
throw new IllegalArgumentException("Minimum versions must be < "
|
|
||||||
+ "maximum versions.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setMaxVersions(maxVersions);
|
|
||||||
setMinVersions(minVersions);
|
|
||||||
setKeepDeletedCells(keepDeletedCells);
|
|
||||||
setInMemory(inMemory);
|
|
||||||
setBlockCacheEnabled(blockCacheEnabled);
|
|
||||||
setTimeToLive(timeToLive);
|
|
||||||
setCompressionType(Compression.Algorithm.
|
|
||||||
valueOf(compression.toUpperCase()));
|
|
||||||
setDataBlockEncoding(DataBlockEncoding.
|
|
||||||
valueOf(dataBlockEncoding.toUpperCase()));
|
|
||||||
setBloomFilterType(BloomType.
|
|
||||||
valueOf(bloomFilter.toUpperCase()));
|
|
||||||
setBlocksize(blocksize);
|
|
||||||
setScope(scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param b Family name.
|
* @param b Family name.
|
||||||
* @return <code>b</code>
|
* @return <code>b</code>
|
||||||
|
@ -683,30 +544,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(COMPRESSION, type.getName().toUpperCase());
|
return setValue(COMPRESSION, type.getName().toUpperCase());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return data block encoding algorithm used on disk
|
|
||||||
* @deprecated As of release 0.98
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9870">HBASE-9870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. See {@link #getDataBlockEncoding()}}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public DataBlockEncoding getDataBlockEncodingOnDisk() {
|
|
||||||
return getDataBlockEncoding();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This method does nothing now. Flag ENCODE_ON_DISK is not used
|
|
||||||
* any more. Data blocks have the same encoding in cache as on disk.
|
|
||||||
* @return this (for chained invocation)
|
|
||||||
* @deprecated As of release 0.98
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9870">HBASE-9870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. This method does nothing now.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the data block encoding algorithm used in block cache and
|
* @return the data block encoding algorithm used in block cache and
|
||||||
* optionally on disk
|
* optionally on disk
|
||||||
|
@ -745,23 +582,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
|
return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Whether KV tags should be compressed along with DataBlockEncoding. When no
|
|
||||||
* DataBlockEncoding is been used, this is having no effect.
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #isCompressTags()} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldCompressTags() {
|
|
||||||
String compressTagsStr = getValue(COMPRESS_TAGS);
|
|
||||||
boolean compressTags = DEFAULT_COMPRESS_TAGS;
|
|
||||||
if (compressTagsStr != null) {
|
|
||||||
compressTags = Boolean.parseBoolean(compressTagsStr);
|
|
||||||
}
|
|
||||||
return compressTags;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Whether KV tags should be compressed along with DataBlockEncoding. When no
|
* @return Whether KV tags should be compressed along with DataBlockEncoding. When no
|
||||||
* DataBlockEncoding is been used, this is having no effect.
|
* DataBlockEncoding is been used, this is having no effect.
|
||||||
|
@ -825,21 +645,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return DEFAULT_KEEP_DELETED;
|
return DEFAULT_KEEP_DELETED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param keepDeletedCells True if deleted rows should not be collected
|
|
||||||
* immediately.
|
|
||||||
* @return this (for chained invocation)
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-12363">HBASE-12363</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #setKeepDeletedCells(KeepDeletedCells)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
|
|
||||||
return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
|
|
||||||
: KeepDeletedCells.FALSE).toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param keepDeletedCells True if deleted rows should not be collected
|
* @param keepDeletedCells True if deleted rows should not be collected
|
||||||
* immediately.
|
* immediately.
|
||||||
|
@ -941,17 +746,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(REPLICATION_SCOPE, Integer.toString(scope));
|
return setValue(REPLICATION_SCOPE, Integer.toString(scope));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should cache data blocks on write
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #isCacheDataOnWrite()}} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldCacheDataOnWrite() {
|
|
||||||
return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should cache data blocks on write
|
* @return true if we should cache data blocks on write
|
||||||
*/
|
*/
|
||||||
|
@ -967,18 +761,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
|
return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should cache data blocks in the L1 cache (if block cache deploy
|
|
||||||
* has more than one tier; e.g. we are using CombinedBlockCache).
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #isCacheDataInL1()}} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldCacheDataInL1() {
|
|
||||||
return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
|
* @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
|
||||||
* than one tier; e.g. we are using CombinedBlockCache).
|
* than one tier; e.g. we are using CombinedBlockCache).
|
||||||
|
@ -1004,18 +786,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return defaultSetting;
|
return defaultSetting;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should cache index blocks on write
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #isCacheIndexesOnWrite()} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldCacheIndexesOnWrite() {
|
|
||||||
return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should cache index blocks on write
|
* @return true if we should cache index blocks on write
|
||||||
*/
|
*/
|
||||||
|
@ -1031,18 +801,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
|
return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should cache bloomfilter blocks on write
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #isCacheBloomsOnWrite()}} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldCacheBloomsOnWrite() {
|
|
||||||
return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should cache bloomfilter blocks on write
|
* @return true if we should cache bloomfilter blocks on write
|
||||||
*/
|
*/
|
||||||
|
@ -1058,19 +816,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
|
return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should evict cached blocks from the blockcache on
|
|
||||||
* close
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #isEvictBlocksOnClose()}} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldEvictBlocksOnClose() {
|
|
||||||
return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should evict cached blocks from the blockcache on close
|
* @return true if we should evict cached blocks from the blockcache on close
|
||||||
*/
|
*/
|
||||||
|
@ -1087,18 +832,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
||||||
return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
|
return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if we should prefetch blocks into the blockcache on open
|
|
||||||
* @deprecated As of release 1.0.0
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #isPrefetchBlocksOnOpen()}}} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public boolean shouldPrefetchBlocksOnOpen() {
|
|
||||||
return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true if we should prefetch blocks into the blockcache on open
|
* @return true if we should prefetch blocks into the blockcache on open
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -233,17 +233,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
setHashCode();
|
setHashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Default constructor - creates empty object
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Used by Writables and Writables are going away.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public HRegionInfo() {
|
|
||||||
super();
|
|
||||||
}
|
|
||||||
|
|
||||||
public HRegionInfo(final TableName tableName) {
|
public HRegionInfo(final TableName tableName) {
|
||||||
this(tableName, null, null);
|
this(tableName, null, null);
|
||||||
}
|
}
|
||||||
|
@ -487,14 +476,10 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the table name from the specified region name.
|
* Gets the table name from the specified region name.
|
||||||
* @param regionName
|
* @param regionName to extract the table name from
|
||||||
* @return Table name.
|
* @return Table name
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #getTable(byte[])}.
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
public static TableName getTable(final byte [] regionName) {
|
||||||
public static byte [] getTableName(byte[] regionName) {
|
|
||||||
int offset = -1;
|
int offset = -1;
|
||||||
for (int i = 0; i < regionName.length; i++) {
|
for (int i = 0; i < regionName.length; i++) {
|
||||||
if (regionName[i] == HConstants.DELIMITER) {
|
if (regionName[i] == HConstants.DELIMITER) {
|
||||||
|
@ -504,19 +489,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
}
|
}
|
||||||
byte[] buff = new byte[offset];
|
byte[] buff = new byte[offset];
|
||||||
System.arraycopy(regionName, 0, buff, 0, offset);
|
System.arraycopy(regionName, 0, buff, 0, offset);
|
||||||
return buff;
|
return TableName.valueOf(buff);
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the table name from the specified region name.
|
|
||||||
* Like {@link #getTableName(byte[])} only returns a {@link TableName} rather than a byte array.
|
|
||||||
* @param regionName
|
|
||||||
* @return Table name
|
|
||||||
* @see #getTableName(byte[])
|
|
||||||
*/
|
|
||||||
public static TableName getTable(final byte [] regionName) {
|
|
||||||
return TableName.valueOf(getTableName(regionName));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -655,26 +628,13 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
return endKey;
|
return endKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get current table name of the region
|
|
||||||
* @return byte array of table name
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #getTable()}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public byte [] getTableName() {
|
|
||||||
return getTable().toBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get current table name of the region
|
* Get current table name of the region
|
||||||
* @return TableName
|
* @return TableName
|
||||||
* @see #getTableName()
|
|
||||||
*/
|
*/
|
||||||
public TableName getTable() {
|
public TableName getTable() {
|
||||||
// This method name should be getTableName but there was already a method getTableName
|
// This method name should be getTableName but there was already a method getTableName
|
||||||
// that returned a byte array. It is unfortunate given everwhere else, getTableName returns
|
// that returned a byte array. It is unfortunate given everywhere else, getTableName returns
|
||||||
// a TableName instance.
|
// a TableName instance.
|
||||||
if (tableName == null || tableName.getName().length == 0) {
|
if (tableName == null || tableName.getName().length == 0) {
|
||||||
tableName = getTable(getRegionName());
|
tableName = getTable(getRegionName());
|
||||||
|
@ -1052,7 +1012,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the end key for display. Optionally hide the real end key.
|
* Get the end key for display. Optionally hide the real end key.
|
||||||
* @param hri
|
* @param hri
|
||||||
* @param conf
|
* @param conf
|
||||||
* @return the endkey
|
* @return the endkey
|
||||||
|
@ -1064,7 +1024,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the start key for display. Optionally hide the real start key.
|
* Get the start key for display. Optionally hide the real start key.
|
||||||
* @param hri
|
* @param hri
|
||||||
* @param conf
|
* @param conf
|
||||||
* @return the startkey
|
* @return the startkey
|
||||||
|
@ -1123,7 +1083,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||||
modifiedName[lengthSoFar - 1] = ENC_SEPARATOR;
|
modifiedName[lengthSoFar - 1] = ENC_SEPARATOR;
|
||||||
System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar,
|
System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar,
|
||||||
encodedRegionName.length);
|
encodedRegionName.length);
|
||||||
lengthSoFar += encodedRegionName.length;
|
lengthSoFar += encodedRegionName.length;
|
||||||
modifiedName[lengthSoFar] = ENC_SEPARATOR;
|
modifiedName[lengthSoFar] = ENC_SEPARATOR;
|
||||||
return modifiedName;
|
return modifiedName;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -294,13 +294,12 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
|
||||||
/**
|
/**
|
||||||
* Default constructor which constructs an empty object.
|
* Default constructor which constructs an empty object.
|
||||||
* For deserializing an HTableDescriptor instance only.
|
* For deserializing an HTableDescriptor instance only.
|
||||||
* @deprecated As of release 0.96
|
* @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
* This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Used by Writables and Writables are going away.
|
* Used by Writables and Writables are going away.
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public HTableDescriptor() {
|
protected HTableDescriptor() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -163,17 +163,6 @@ public class ClientScanner extends AbstractClientScanner {
|
||||||
return this.connection;
|
return this.connection;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return Table name
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0. Use {@link #getTable()}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
protected byte [] getTableName() {
|
|
||||||
return this.tableName.getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected TableName getTable() {
|
protected TableName getTable() {
|
||||||
return this.tableName;
|
return this.tableName;
|
||||||
}
|
}
|
||||||
|
@ -341,7 +330,7 @@ public class ClientScanner extends AbstractClientScanner {
|
||||||
*
|
*
|
||||||
* By default, scan metrics are disabled; if the application wants to collect them, this
|
* By default, scan metrics are disabled; if the application wants to collect them, this
|
||||||
* behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
|
* behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
|
||||||
*
|
*
|
||||||
* <p>This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
|
* <p>This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
|
||||||
*/
|
*/
|
||||||
protected void writeScanMetrics() {
|
protected void writeScanMetrics() {
|
||||||
|
|
|
@ -972,21 +972,6 @@ public class HTable implements HTableInterface {
|
||||||
return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL);
|
return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
@Override
|
|
||||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
|
||||||
final byte [] qualifier, final long amount, final boolean writeToWAL)
|
|
||||||
throws IOException {
|
|
||||||
return incrementColumnValue(row, family, qualifier, amount,
|
|
||||||
writeToWAL? Durability.SKIP_WAL: Durability.USE_DEFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -45,17 +45,6 @@ public interface HTableInterface extends Table {
|
||||||
@Deprecated
|
@Deprecated
|
||||||
byte[] getTableName();
|
byte[] getTableName();
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
long incrementColumnValue(final byte [] row, final byte [] family,
|
|
||||||
final byte [] qualifier, final long amount, final boolean writeToWAL)
|
|
||||||
throws IOException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @deprecated Use {@link #existsAll(java.util.List)} instead.
|
* @deprecated Use {@link #existsAll(java.util.List)} instead.
|
||||||
*/
|
*/
|
||||||
|
@ -121,7 +110,7 @@ public interface HTableInterface extends Table {
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
void setAutoFlushTo(boolean autoFlush);
|
void setAutoFlushTo(boolean autoFlush);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tells whether or not 'auto-flush' is turned on.
|
* Tells whether or not 'auto-flush' is turned on.
|
||||||
*
|
*
|
||||||
|
|
|
@ -30,7 +30,12 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
||||||
/** Default constructor */
|
/**
|
||||||
|
* Default constructor.
|
||||||
|
* @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0.
|
||||||
|
* Use {@link #UnmodifyableHTableDescriptor(HTableDescriptor)}.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
public UnmodifyableHTableDescriptor() {
|
public UnmodifyableHTableDescriptor() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
|
@ -2589,7 +2589,7 @@ public final class ProtobufUtil {
|
||||||
// input / output paths are relative to the store dir
|
// input / output paths are relative to the store dir
|
||||||
// store dir is relative to region dir
|
// store dir is relative to region dir
|
||||||
CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
|
CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
|
||||||
.setTableName(ByteStringer.wrap(info.getTableName()))
|
.setTableName(ByteStringer.wrap(info.getTable().toBytes()))
|
||||||
.setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes()))
|
.setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes()))
|
||||||
.setFamilyName(ByteStringer.wrap(family))
|
.setFamilyName(ByteStringer.wrap(family))
|
||||||
.setStoreHomeDir(storeDir.getName()); //make relative
|
.setStoreHomeDir(storeDir.getName()); //make relative
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HConnection;
|
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||||
|
@ -669,19 +668,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link #tryAtomicRegionLoad(Connection, TableName, byte[], Collection)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
|
|
||||||
final byte [] tableName, final byte[] first, Collection<LoadQueueItem> lqis)
|
|
||||||
throws IOException {
|
|
||||||
return tryAtomicRegionLoad(conn, TableName.valueOf(tableName), first, lqis);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attempts to do an atomic load of many hfiles into a region. If it fails,
|
* Attempts to do an atomic load of many hfiles into a region. If it fails,
|
||||||
* it returns a list of hfiles that need to be retried. If it is successful
|
* it returns a list of hfiles that need to be retried. If it is successful
|
||||||
|
|
|
@ -46,7 +46,7 @@ implements Writable, Comparable<TableSplit> {
|
||||||
/** @deprecated LOG variable would be made private. */
|
/** @deprecated LOG variable would be made private. */
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public static final Log LOG = LogFactory.getLog(TableSplit.class);
|
public static final Log LOG = LogFactory.getLog(TableSplit.class);
|
||||||
|
|
||||||
// should be < 0 (@see #readFields(DataInput))
|
// should be < 0 (@see #readFields(DataInput))
|
||||||
// version 1 supports Scan data member
|
// version 1 supports Scan data member
|
||||||
enum Version {
|
enum Version {
|
||||||
|
@ -77,7 +77,7 @@ implements Writable, Comparable<TableSplit> {
|
||||||
return byCode[code * -1];
|
return byCode[code * -1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final Version VERSION = Version.INITIAL;
|
private static final Version VERSION = Version.INITIAL;
|
||||||
private TableName tableName;
|
private TableName tableName;
|
||||||
private byte [] startRow;
|
private byte [] startRow;
|
||||||
|
@ -92,18 +92,6 @@ implements Writable, Comparable<TableSplit> {
|
||||||
HConstants.EMPTY_BYTE_ARRAY, "");
|
HConstants.EMPTY_BYTE_ARRAY, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
|
|
||||||
final String location) {
|
|
||||||
this(TableName.valueOf(tableName), scan, startRow, endRow, location);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new instance while assigning all variables.
|
* Creates a new instance while assigning all variables.
|
||||||
* Length of region is set to 0
|
* Length of region is set to 0
|
||||||
|
@ -143,18 +131,6 @@ implements Writable, Comparable<TableSplit> {
|
||||||
this.length = length;
|
this.length = length;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated As of release 0.96
|
|
||||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
|
||||||
* This will be removed in HBase 2.0.0.
|
|
||||||
* Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow,
|
|
||||||
final String location) {
|
|
||||||
this(TableName.valueOf(tableName), startRow, endRow, location);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new instance without a scanner.
|
* Creates a new instance without a scanner.
|
||||||
*
|
*
|
||||||
|
|
|
@ -120,10 +120,8 @@ public class TestHColumnDescriptorDefaultVersions {
|
||||||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||||
// Create a table with one family
|
// Create a table with one family
|
||||||
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
|
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
|
||||||
HColumnDescriptor hcd =
|
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
|
||||||
new HColumnDescriptor(FAMILY, 5, HColumnDescriptor.DEFAULT_COMPRESSION,
|
hcd.setMaxVersions(5);
|
||||||
HColumnDescriptor.DEFAULT_IN_MEMORY, HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
|
||||||
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER);
|
|
||||||
baseHtd.addFamily(hcd);
|
baseHtd.addFamily(hcd);
|
||||||
admin.createTable(baseHtd);
|
admin.createTable(baseHtd);
|
||||||
admin.disableTable(TABLE_NAME);
|
admin.disableTable(TABLE_NAME);
|
||||||
|
@ -161,4 +159,4 @@ public class TestHColumnDescriptorDefaultVersions {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1272,16 +1272,6 @@ public class TestAdmin1 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* HADOOP-2156
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
@Test (expected=IllegalArgumentException.class, timeout=300000)
|
|
||||||
public void testEmptyHTableDescriptor() throws IOException {
|
|
||||||
this.admin.createTable(new HTableDescriptor());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test (expected=IllegalArgumentException.class, timeout=300000)
|
@Test (expected=IllegalArgumentException.class, timeout=300000)
|
||||||
public void testInvalidHColumnDescriptor() throws IOException {
|
public void testInvalidHColumnDescriptor() throws IOException {
|
||||||
new HColumnDescriptor("/cfamily/name");
|
new HColumnDescriptor("/cfamily/name");
|
||||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.RegionLocations;
|
import org.apache.hadoop.hbase.RegionLocations;
|
||||||
|
@ -182,7 +183,8 @@ public class TestFromClientSide {
|
||||||
final byte[] T2 = Bytes.toBytes("T2");
|
final byte[] T2 = Bytes.toBytes("T2");
|
||||||
final byte[] T3 = Bytes.toBytes("T3");
|
final byte[] T3 = Bytes.toBytes("T3");
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
|
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
|
||||||
.setKeepDeletedCells(true).setMaxVersions(3);
|
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||||
|
.setMaxVersions(3);
|
||||||
|
|
||||||
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
|
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
|
||||||
desc.addFamily(hcd);
|
desc.addFamily(hcd);
|
||||||
|
|
|
@ -109,7 +109,7 @@ implements WALObserver {
|
||||||
HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
|
HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
|
||||||
boolean bypass = false;
|
boolean bypass = false;
|
||||||
// check table name matches or not.
|
// check table name matches or not.
|
||||||
if (!Bytes.equals(info.getTableName(), this.tableName)) {
|
if (!Bytes.equals(info.getTable().toBytes(), this.tableName)) {
|
||||||
return bypass;
|
return bypass;
|
||||||
}
|
}
|
||||||
preWALWriteCalled = true;
|
preWALWriteCalled = true;
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
|
@ -281,7 +282,7 @@ public class TestImportExport {
|
||||||
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE));
|
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE));
|
||||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||||
.setMaxVersions(5)
|
.setMaxVersions(5)
|
||||||
.setKeepDeletedCells(true)
|
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||||
);
|
);
|
||||||
UTIL.getHBaseAdmin().createTable(desc);
|
UTIL.getHBaseAdmin().createTable(desc);
|
||||||
Table t = UTIL.getConnection().getTable(desc.getTableName());
|
Table t = UTIL.getConnection().getTable(desc.getTableName());
|
||||||
|
@ -312,7 +313,7 @@ public class TestImportExport {
|
||||||
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
||||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||||
.setMaxVersions(5)
|
.setMaxVersions(5)
|
||||||
.setKeepDeletedCells(true)
|
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||||
);
|
);
|
||||||
UTIL.getHBaseAdmin().createTable(desc);
|
UTIL.getHBaseAdmin().createTable(desc);
|
||||||
t.close();
|
t.close();
|
||||||
|
@ -347,7 +348,7 @@ public class TestImportExport {
|
||||||
HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE);
|
HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE);
|
||||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||||
.setMaxVersions(5)
|
.setMaxVersions(5)
|
||||||
.setKeepDeletedCells(true)
|
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||||
);
|
);
|
||||||
UTIL.getHBaseAdmin().createTable(desc);
|
UTIL.getHBaseAdmin().createTable(desc);
|
||||||
|
|
||||||
|
@ -383,7 +384,7 @@ public class TestImportExport {
|
||||||
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
||||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||||
.setMaxVersions(5)
|
.setMaxVersions(5)
|
||||||
.setKeepDeletedCells(true)
|
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||||
);
|
);
|
||||||
UTIL.getHBaseAdmin().createTable(desc);
|
UTIL.getHBaseAdmin().createTable(desc);
|
||||||
|
|
||||||
|
|
|
@ -1259,7 +1259,7 @@ public class TestDistributedLogSplitting {
|
||||||
byte[] family = Bytes.toBytes("family");
|
byte[] family = Bytes.toBytes("family");
|
||||||
byte[] qualifier = Bytes.toBytes("c1");
|
byte[] qualifier = Bytes.toBytes("c1");
|
||||||
long timeStamp = System.currentTimeMillis();
|
long timeStamp = System.currentTimeMillis();
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
final WAL wal = hrs.getWAL(curRegionInfo);
|
final WAL wal = hrs.getWAL(curRegionInfo);
|
||||||
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
|
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
|
||||||
|
|
|
@ -150,7 +150,7 @@ public class TestRegionSplitPolicy {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testCustomPolicy() throws IOException {
|
public void testCustomPolicy() throws IOException {
|
||||||
HTableDescriptor myHtd = new HTableDescriptor();
|
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf("foobar"));
|
||||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||||
KeyPrefixRegionSplitPolicy.class.getName());
|
KeyPrefixRegionSplitPolicy.class.getName());
|
||||||
myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));
|
myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));
|
||||||
|
@ -259,7 +259,7 @@ public class TestRegionSplitPolicy {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
|
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
|
||||||
HTableDescriptor myHtd = new HTableDescriptor();
|
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf("foobar"));
|
||||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||||
DelimitedKeyPrefixRegionSplitPolicy.class.getName());
|
DelimitedKeyPrefixRegionSplitPolicy.class.getName());
|
||||||
myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");
|
myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class TestWALActionsListener {
|
||||||
KeyValue kv = new KeyValue(b,b,b);
|
KeyValue kv = new KeyValue(b,b,b);
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(kv);
|
edit.add(kv);
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
|
||||||
htd.addFamily(new HColumnDescriptor(b));
|
htd.addFamily(new HColumnDescriptor(b));
|
||||||
|
|
||||||
final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(),
|
final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(),
|
||||||
|
|
|
@ -202,7 +202,7 @@ public class TestReplicationSourceManager {
|
||||||
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes());
|
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes());
|
||||||
final AtomicLong sequenceId = new AtomicLong(1);
|
final AtomicLong sequenceId = new AtomicLong(1);
|
||||||
manager.init();
|
manager.init();
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame"));
|
||||||
htd.addFamily(new HColumnDescriptor(f1));
|
htd.addFamily(new HColumnDescriptor(f1));
|
||||||
// Testing normal log rolling every 20
|
// Testing normal log rolling every 20
|
||||||
for(long i = 1; i < 101; i++) {
|
for(long i = 1; i < 101; i++) {
|
||||||
|
|
|
@ -245,7 +245,7 @@ public class TestWALFactory {
|
||||||
try {
|
try {
|
||||||
HRegionInfo info = new HRegionInfo(tableName,
|
HRegionInfo info = new HRegionInfo(tableName,
|
||||||
null,null, false);
|
null,null, false);
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
||||||
final WAL wal = wals.getWAL(info.getEncodedNameAsBytes());
|
final WAL wal = wals.getWAL(info.getEncodedNameAsBytes());
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ public class TestWALFactory {
|
||||||
final AtomicLong sequenceId = new AtomicLong(1);
|
final AtomicLong sequenceId = new AtomicLong(1);
|
||||||
final int total = 20;
|
final int total = 20;
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
||||||
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
|
@ -599,7 +599,7 @@ public class TestWALFactory {
|
||||||
final DumbWALActionsListener visitor = new DumbWALActionsListener();
|
final DumbWALActionsListener visitor = new DumbWALActionsListener();
|
||||||
final AtomicLong sequenceId = new AtomicLong(1);
|
final AtomicLong sequenceId = new AtomicLong(1);
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
HTableDescriptor htd = new HTableDescriptor();
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(new HColumnDescriptor("column"));
|
htd.addFamily(new HColumnDescriptor("column"));
|
||||||
|
|
||||||
HRegionInfo hri = new HRegionInfo(tableName,
|
HRegionInfo hri = new HRegionInfo(tableName,
|
||||||
|
|
Loading…
Reference in New Issue