HBASE-13464 Remove deprecations for 2.0.0 - Part 1 (Lars Francke)
This commit is contained in:
parent
0dfb364723
commit
2ee5f8f5fd
|
@ -85,18 +85,6 @@ public class ClusterStatus extends VersionedWritable {
|
|||
private String[] masterCoprocessors;
|
||||
private Boolean balancerOn;
|
||||
|
||||
/**
|
||||
* Constructor, for Writable
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-6038">HBASE-6038</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Used by Writables and Writables are going away.
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterStatus() {
|
||||
super();
|
||||
}
|
||||
|
||||
public ClusterStatus(final String hbaseVersion, final String clusterid,
|
||||
final Map<ServerName, ServerLoad> servers,
|
||||
final Collection<ServerName> deadServers,
|
||||
|
@ -220,20 +208,6 @@ public class ClusterStatus extends VersionedWritable {
|
|||
// Getters
|
||||
//
|
||||
|
||||
/**
|
||||
* Returns detailed region server information: A list of
|
||||
* {@link ServerName}.
|
||||
* @return region server information
|
||||
* @deprecated As of release 0.92
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-1502">HBASE-1502</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #getServers()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public Collection<ServerName> getServerInfo() {
|
||||
return getServers();
|
||||
}
|
||||
|
||||
public Collection<ServerName> getServers() {
|
||||
if (liveServers == null) {
|
||||
return Collections.<ServerName>emptyList();
|
||||
|
|
|
@ -285,16 +285,9 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
private int cachedMaxVersions = UNINITIALIZED;
|
||||
|
||||
/**
|
||||
* Default constructor.
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
||||
* This will be made private in HBase 2.0.0.
|
||||
* Used by Writables and Writables are going away.
|
||||
* Default constructor. Must be present for PB deserializations.
|
||||
*/
|
||||
@Deprecated
|
||||
// Make this private rather than remove after deprecation period elapses. Its needed by pb
|
||||
// deserializations.
|
||||
public HColumnDescriptor() {
|
||||
private HColumnDescriptor() {
|
||||
this.name = null;
|
||||
}
|
||||
|
||||
|
@ -317,10 +310,20 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
* letter -- and may not contain a <code>:</code>
|
||||
*/
|
||||
public HColumnDescriptor(final byte [] familyName) {
|
||||
this (familyName == null || familyName.length <= 0?
|
||||
HConstants.EMPTY_BYTE_ARRAY: familyName, DEFAULT_VERSIONS,
|
||||
DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_BLOCKCACHE,
|
||||
DEFAULT_TTL, DEFAULT_BLOOMFILTER);
|
||||
isLegalFamilyName(familyName);
|
||||
this.name = familyName;
|
||||
|
||||
setMaxVersions(DEFAULT_VERSIONS);
|
||||
setMinVersions(DEFAULT_MIN_VERSIONS);
|
||||
setKeepDeletedCells(DEFAULT_KEEP_DELETED);
|
||||
setInMemory(DEFAULT_IN_MEMORY);
|
||||
setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
|
||||
setTimeToLive(DEFAULT_TTL);
|
||||
setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase()));
|
||||
setDataBlockEncoding(DataBlockEncoding.valueOf(DEFAULT_DATA_BLOCK_ENCODING.toUpperCase()));
|
||||
setBloomFilterType(BloomType.valueOf(DEFAULT_BLOOMFILTER.toUpperCase()));
|
||||
setBlocksize(DEFAULT_BLOCKSIZE);
|
||||
setScope(DEFAULT_REPLICATION_SCOPE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -342,148 +345,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
setMaxVersions(desc.getMaxVersions());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
||||
* letter -- and may not contain a <code>:</code>
|
||||
* @param maxVersions Maximum number of versions to keep
|
||||
* @param compression Compression type
|
||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
||||
* cache
|
||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
||||
* (use HConstants.FOREVER for unlimited TTL)
|
||||
* @param bloomFilter Bloom filter type for this column
|
||||
*
|
||||
* @throws IllegalArgumentException if passed a family name that is made of
|
||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
||||
final String compression, final boolean inMemory,
|
||||
final boolean blockCacheEnabled,
|
||||
final int timeToLive, final String bloomFilter) {
|
||||
this(familyName, maxVersions, compression, inMemory, blockCacheEnabled,
|
||||
DEFAULT_BLOCKSIZE, timeToLive, bloomFilter, DEFAULT_REPLICATION_SCOPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
||||
* letter -- and may not contain a <code>:</code>
|
||||
* @param maxVersions Maximum number of versions to keep
|
||||
* @param compression Compression type
|
||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
||||
* cache
|
||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
||||
* @param blocksize Block size to use when writing out storefiles. Use
|
||||
* smaller block sizes for faster random-access at expense of larger indices
|
||||
* (more memory consumption). Default is usually 64k.
|
||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
||||
* (use HConstants.FOREVER for unlimited TTL)
|
||||
* @param bloomFilter Bloom filter type for this column
|
||||
* @param scope The scope tag for this column
|
||||
*
|
||||
* @throws IllegalArgumentException if passed a family name that is made of
|
||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte [] familyName, final int maxVersions,
|
||||
final String compression, final boolean inMemory,
|
||||
final boolean blockCacheEnabled, final int blocksize,
|
||||
final int timeToLive, final String bloomFilter, final int scope) {
|
||||
this(familyName, DEFAULT_MIN_VERSIONS, maxVersions, DEFAULT_KEEP_DELETED,
|
||||
compression, DEFAULT_ENCODE_ON_DISK, DEFAULT_DATA_BLOCK_ENCODING,
|
||||
inMemory, blockCacheEnabled, blocksize, timeToLive, bloomFilter,
|
||||
scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param familyName Column family name. Must be 'printable' -- digit or
|
||||
* letter -- and may not contain a <code>:</code>
|
||||
* @param minVersions Minimum number of versions to keep
|
||||
* @param maxVersions Maximum number of versions to keep
|
||||
* @param keepDeletedCells Whether to retain deleted cells until they expire
|
||||
* up to maxVersions versions.
|
||||
* @param compression Compression type
|
||||
* @param encodeOnDisk whether to use the specified data block encoding
|
||||
* on disk. If false, the encoding will be used in cache only.
|
||||
* @param dataBlockEncoding data block encoding
|
||||
* @param inMemory If true, column data should be kept in an HRegionServer's
|
||||
* cache
|
||||
* @param blockCacheEnabled If true, MapFile blocks should be cached
|
||||
* @param blocksize Block size to use when writing out storefiles. Use
|
||||
* smaller blocksizes for faster random-access at expense of larger indices
|
||||
* (more memory consumption). Default is usually 64k.
|
||||
* @param timeToLive Time-to-live of cell contents, in seconds
|
||||
* (use HConstants.FOREVER for unlimited TTL)
|
||||
* @param bloomFilter Bloom filter type for this column
|
||||
* @param scope The scope tag for this column
|
||||
*
|
||||
* @throws IllegalArgumentException if passed a family name that is made of
|
||||
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> or contains
|
||||
* a <code>:</code>
|
||||
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #HColumnDescriptor(String)} and setters.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor(final byte[] familyName, final int minVersions,
|
||||
final int maxVersions, final KeepDeletedCells keepDeletedCells,
|
||||
final String compression, final boolean encodeOnDisk,
|
||||
final String dataBlockEncoding, final boolean inMemory,
|
||||
final boolean blockCacheEnabled, final int blocksize,
|
||||
final int timeToLive, final String bloomFilter, final int scope) {
|
||||
isLegalFamilyName(familyName);
|
||||
this.name = familyName;
|
||||
|
||||
if (maxVersions <= 0) {
|
||||
// TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
|
||||
// Until there is support, consider 0 or < 0 -- a configuration error.
|
||||
throw new IllegalArgumentException("Maximum versions must be positive");
|
||||
}
|
||||
|
||||
if (minVersions > 0) {
|
||||
if (timeToLive == HConstants.FOREVER) {
|
||||
throw new IllegalArgumentException("Minimum versions requires TTL.");
|
||||
}
|
||||
if (minVersions >= maxVersions) {
|
||||
throw new IllegalArgumentException("Minimum versions must be < "
|
||||
+ "maximum versions.");
|
||||
}
|
||||
}
|
||||
|
||||
setMaxVersions(maxVersions);
|
||||
setMinVersions(minVersions);
|
||||
setKeepDeletedCells(keepDeletedCells);
|
||||
setInMemory(inMemory);
|
||||
setBlockCacheEnabled(blockCacheEnabled);
|
||||
setTimeToLive(timeToLive);
|
||||
setCompressionType(Compression.Algorithm.
|
||||
valueOf(compression.toUpperCase()));
|
||||
setDataBlockEncoding(DataBlockEncoding.
|
||||
valueOf(dataBlockEncoding.toUpperCase()));
|
||||
setBloomFilterType(BloomType.
|
||||
valueOf(bloomFilter.toUpperCase()));
|
||||
setBlocksize(blocksize);
|
||||
setScope(scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param b Family name.
|
||||
* @return <code>b</code>
|
||||
|
@ -683,30 +544,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(COMPRESSION, type.getName().toUpperCase());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return data block encoding algorithm used on disk
|
||||
* @deprecated As of release 0.98
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9870">HBASE-9870</a>).
|
||||
* This will be removed in HBase 2.0.0. See {@link #getDataBlockEncoding()}}
|
||||
*/
|
||||
@Deprecated
|
||||
public DataBlockEncoding getDataBlockEncodingOnDisk() {
|
||||
return getDataBlockEncoding();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method does nothing now. Flag ENCODE_ON_DISK is not used
|
||||
* any more. Data blocks have the same encoding in cache as on disk.
|
||||
* @return this (for chained invocation)
|
||||
* @deprecated As of release 0.98
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9870">HBASE-9870</a>).
|
||||
* This will be removed in HBase 2.0.0. This method does nothing now.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the data block encoding algorithm used in block cache and
|
||||
* optionally on disk
|
||||
|
@ -745,23 +582,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(COMPRESS_TAGS, String.valueOf(compressTags));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether KV tags should be compressed along with DataBlockEncoding. When no
|
||||
* DataBlockEncoding is been used, this is having no effect.
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #isCompressTags()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldCompressTags() {
|
||||
String compressTagsStr = getValue(COMPRESS_TAGS);
|
||||
boolean compressTags = DEFAULT_COMPRESS_TAGS;
|
||||
if (compressTagsStr != null) {
|
||||
compressTags = Boolean.parseBoolean(compressTagsStr);
|
||||
}
|
||||
return compressTags;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether KV tags should be compressed along with DataBlockEncoding. When no
|
||||
* DataBlockEncoding is been used, this is having no effect.
|
||||
|
@ -825,21 +645,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return DEFAULT_KEEP_DELETED;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keepDeletedCells True if deleted rows should not be collected
|
||||
* immediately.
|
||||
* @return this (for chained invocation)
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-12363">HBASE-12363</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #setKeepDeletedCells(KeepDeletedCells)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) {
|
||||
return setValue(KEEP_DELETED_CELLS, (keepDeletedCells ? KeepDeletedCells.TRUE
|
||||
: KeepDeletedCells.FALSE).toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keepDeletedCells True if deleted rows should not be collected
|
||||
* immediately.
|
||||
|
@ -941,17 +746,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(REPLICATION_SCOPE, Integer.toString(scope));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache data blocks on write
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #isCacheDataOnWrite()}} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldCacheDataOnWrite() {
|
||||
return setAndGetBoolean(CACHE_DATA_ON_WRITE, DEFAULT_CACHE_DATA_ON_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache data blocks on write
|
||||
*/
|
||||
|
@ -967,18 +761,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(CACHE_DATA_ON_WRITE, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache data blocks in the L1 cache (if block cache deploy
|
||||
* has more than one tier; e.g. we are using CombinedBlockCache).
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #isCacheDataInL1()}} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldCacheDataInL1() {
|
||||
return setAndGetBoolean(CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_IN_L1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache data blocks in the L1 cache (if block cache deploy has more
|
||||
* than one tier; e.g. we are using CombinedBlockCache).
|
||||
|
@ -1004,18 +786,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return defaultSetting;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache index blocks on write
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #isCacheIndexesOnWrite()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldCacheIndexesOnWrite() {
|
||||
return setAndGetBoolean(CACHE_INDEX_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache index blocks on write
|
||||
*/
|
||||
|
@ -1031,18 +801,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(CACHE_INDEX_ON_WRITE, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache bloomfilter blocks on write
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #isCacheBloomsOnWrite()}} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldCacheBloomsOnWrite() {
|
||||
return setAndGetBoolean(CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_BLOOMS_ON_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should cache bloomfilter blocks on write
|
||||
*/
|
||||
|
@ -1058,19 +816,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(CACHE_BLOOMS_ON_WRITE, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should evict cached blocks from the blockcache on
|
||||
* close
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #isEvictBlocksOnClose()}} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldEvictBlocksOnClose() {
|
||||
return setAndGetBoolean(EVICT_BLOCKS_ON_CLOSE, DEFAULT_EVICT_BLOCKS_ON_CLOSE);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should evict cached blocks from the blockcache on close
|
||||
*/
|
||||
|
@ -1087,18 +832,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
return setValue(EVICT_BLOCKS_ON_CLOSE, Boolean.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should prefetch blocks into the blockcache on open
|
||||
* @deprecated As of release 1.0.0
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-10870">HBASE-10870</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #isPrefetchBlocksOnOpen()}}} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean shouldPrefetchBlocksOnOpen() {
|
||||
return setAndGetBoolean(PREFETCH_BLOCKS_ON_OPEN, DEFAULT_PREFETCH_BLOCKS_ON_OPEN);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if we should prefetch blocks into the blockcache on open
|
||||
*/
|
||||
|
|
|
@ -233,17 +233,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
|||
setHashCode();
|
||||
}
|
||||
|
||||
/** Default constructor - creates empty object
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Used by Writables and Writables are going away.
|
||||
*/
|
||||
@Deprecated
|
||||
public HRegionInfo() {
|
||||
super();
|
||||
}
|
||||
|
||||
public HRegionInfo(final TableName tableName) {
|
||||
this(tableName, null, null);
|
||||
}
|
||||
|
@ -487,14 +476,10 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
|||
|
||||
/**
|
||||
* Gets the table name from the specified region name.
|
||||
* @param regionName
|
||||
* @return Table name.
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #getTable(byte[])}.
|
||||
* @param regionName to extract the table name from
|
||||
* @return Table name
|
||||
*/
|
||||
@Deprecated
|
||||
public static byte [] getTableName(byte[] regionName) {
|
||||
public static TableName getTable(final byte [] regionName) {
|
||||
int offset = -1;
|
||||
for (int i = 0; i < regionName.length; i++) {
|
||||
if (regionName[i] == HConstants.DELIMITER) {
|
||||
|
@ -504,19 +489,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
|||
}
|
||||
byte[] buff = new byte[offset];
|
||||
System.arraycopy(regionName, 0, buff, 0, offset);
|
||||
return buff;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Gets the table name from the specified region name.
|
||||
* Like {@link #getTableName(byte[])} only returns a {@link TableName} rather than a byte array.
|
||||
* @param regionName
|
||||
* @return Table name
|
||||
* @see #getTableName(byte[])
|
||||
*/
|
||||
public static TableName getTable(final byte [] regionName) {
|
||||
return TableName.valueOf(getTableName(regionName));
|
||||
return TableName.valueOf(buff);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -655,26 +628,13 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
|||
return endKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current table name of the region
|
||||
* @return byte array of table name
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #getTable()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public byte [] getTableName() {
|
||||
return getTable().toBytes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current table name of the region
|
||||
* @return TableName
|
||||
* @see #getTableName()
|
||||
*/
|
||||
public TableName getTable() {
|
||||
// This method name should be getTableName but there was already a method getTableName
|
||||
// that returned a byte array. It is unfortunate given everwhere else, getTableName returns
|
||||
// that returned a byte array. It is unfortunate given everywhere else, getTableName returns
|
||||
// a TableName instance.
|
||||
if (tableName == null || tableName.getName().length == 0) {
|
||||
tableName = getTable(getRegionName());
|
||||
|
|
|
@ -294,13 +294,12 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
|
|||
/**
|
||||
* Default constructor which constructs an empty object.
|
||||
* For deserializing an HTableDescriptor instance only.
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* @deprecated As of release 0.96 (<a href="https://issues.apache.org/jira/browse/HBASE-5453">HBASE-5453</a>).
|
||||
* This was made protected in 2.0.0 and will be removed in HBase 3.0.0.
|
||||
* Used by Writables and Writables are going away.
|
||||
*/
|
||||
@Deprecated
|
||||
public HTableDescriptor() {
|
||||
protected HTableDescriptor() {
|
||||
super();
|
||||
}
|
||||
|
||||
|
|
|
@ -163,17 +163,6 @@ public class ClientScanner extends AbstractClientScanner {
|
|||
return this.connection;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Table name
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0. Use {@link #getTable()}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected byte [] getTableName() {
|
||||
return this.tableName.getName();
|
||||
}
|
||||
|
||||
protected TableName getTable() {
|
||||
return this.tableName;
|
||||
}
|
||||
|
|
|
@ -972,21 +972,6 @@ public class HTable implements HTableInterface {
|
|||
return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}.
|
||||
*/
|
||||
@Deprecated
|
||||
@Override
|
||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount, final boolean writeToWAL)
|
||||
throws IOException {
|
||||
return incrementColumnValue(row, family, qualifier, amount,
|
||||
writeToWAL? Durability.SKIP_WAL: Durability.USE_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
|
|
@ -45,17 +45,6 @@ public interface HTableInterface extends Table {
|
|||
@Deprecated
|
||||
byte[] getTableName();
|
||||
|
||||
/**
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}.
|
||||
*/
|
||||
@Deprecated
|
||||
long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount, final boolean writeToWAL)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #existsAll(java.util.List)} instead.
|
||||
*/
|
||||
|
|
|
@ -30,7 +30,12 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class UnmodifyableHTableDescriptor extends HTableDescriptor {
|
||||
/** Default constructor */
|
||||
/**
|
||||
* Default constructor.
|
||||
* @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0.
|
||||
* Use {@link #UnmodifyableHTableDescriptor(HTableDescriptor)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public UnmodifyableHTableDescriptor() {
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -2589,7 +2589,7 @@ public final class ProtobufUtil {
|
|||
// input / output paths are relative to the store dir
|
||||
// store dir is relative to region dir
|
||||
CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
|
||||
.setTableName(ByteStringer.wrap(info.getTableName()))
|
||||
.setTableName(ByteStringer.wrap(info.getTable().toBytes()))
|
||||
.setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes()))
|
||||
.setFamilyName(ByteStringer.wrap(family))
|
||||
.setStoreHomeDir(storeDir.getName()); //make relative
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HConnection;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||
|
@ -669,19 +668,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link #tryAtomicRegionLoad(Connection, TableName, byte[], Collection)}.
|
||||
*/
|
||||
@Deprecated
|
||||
protected List<LoadQueueItem> tryAtomicRegionLoad(final HConnection conn,
|
||||
final byte [] tableName, final byte[] first, Collection<LoadQueueItem> lqis)
|
||||
throws IOException {
|
||||
return tryAtomicRegionLoad(conn, TableName.valueOf(tableName), first, lqis);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to do an atomic load of many hfiles into a region. If it fails,
|
||||
* it returns a list of hfiles that need to be retried. If it is successful
|
||||
|
|
|
@ -92,18 +92,6 @@ implements Writable, Comparable<TableSplit> {
|
|||
HConstants.EMPTY_BYTE_ARRAY, "");
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
|
||||
final String location) {
|
||||
this(TableName.valueOf(tableName), scan, startRow, endRow, location);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new instance while assigning all variables.
|
||||
* Length of region is set to 0
|
||||
|
@ -143,18 +131,6 @@ implements Writable, Comparable<TableSplit> {
|
|||
this.length = length;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 0.96
|
||||
* (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
|
||||
* This will be removed in HBase 2.0.0.
|
||||
* Use {@link TableSplit#TableSplit(TableName, byte[], byte[], String)}.
|
||||
*/
|
||||
@Deprecated
|
||||
public TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow,
|
||||
final String location) {
|
||||
this(TableName.valueOf(tableName), startRow, endRow, location);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new instance without a scanner.
|
||||
*
|
||||
|
|
|
@ -120,10 +120,8 @@ public class TestHColumnDescriptorDefaultVersions {
|
|||
Admin admin = TEST_UTIL.getHBaseAdmin();
|
||||
// Create a table with one family
|
||||
HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
|
||||
HColumnDescriptor hcd =
|
||||
new HColumnDescriptor(FAMILY, 5, HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY, HColumnDescriptor.DEFAULT_BLOCKCACHE,
|
||||
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
|
||||
hcd.setMaxVersions(5);
|
||||
baseHtd.addFamily(hcd);
|
||||
admin.createTable(baseHtd);
|
||||
admin.disableTable(TABLE_NAME);
|
||||
|
|
|
@ -1272,16 +1272,6 @@ public class TestAdmin1 {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HADOOP-2156
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
@Test (expected=IllegalArgumentException.class, timeout=300000)
|
||||
public void testEmptyHTableDescriptor() throws IOException {
|
||||
this.admin.createTable(new HTableDescriptor());
|
||||
}
|
||||
|
||||
@Test (expected=IllegalArgumentException.class, timeout=300000)
|
||||
public void testInvalidHColumnDescriptor() throws IOException {
|
||||
new HColumnDescriptor("/cfamily/name");
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
|
@ -182,7 +183,8 @@ public class TestFromClientSide {
|
|||
final byte[] T2 = Bytes.toBytes("T2");
|
||||
final byte[] T3 = Bytes.toBytes("T3");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
|
||||
.setKeepDeletedCells(true).setMaxVersions(3);
|
||||
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||
.setMaxVersions(3);
|
||||
|
||||
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
|
||||
desc.addFamily(hcd);
|
||||
|
|
|
@ -109,7 +109,7 @@ implements WALObserver {
|
|||
HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
|
||||
boolean bypass = false;
|
||||
// check table name matches or not.
|
||||
if (!Bytes.equals(info.getTableName(), this.tableName)) {
|
||||
if (!Bytes.equals(info.getTable().toBytes(), this.tableName)) {
|
||||
return bypass;
|
||||
}
|
||||
preWALWriteCalled = true;
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.CellUtil;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -281,7 +282,7 @@ public class TestImportExport {
|
|||
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
Table t = UTIL.getConnection().getTable(desc.getTableName());
|
||||
|
@ -312,7 +313,7 @@ public class TestImportExport {
|
|||
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
t.close();
|
||||
|
@ -347,7 +348,7 @@ public class TestImportExport {
|
|||
HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
|
||||
|
@ -383,7 +384,7 @@ public class TestImportExport {
|
|||
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILYA)
|
||||
.setMaxVersions(5)
|
||||
.setKeepDeletedCells(true)
|
||||
.setKeepDeletedCells(KeepDeletedCells.TRUE)
|
||||
);
|
||||
UTIL.getHBaseAdmin().createTable(desc);
|
||||
|
||||
|
|
|
@ -1259,7 +1259,7 @@ public class TestDistributedLogSplitting {
|
|||
byte[] family = Bytes.toBytes("family");
|
||||
byte[] qualifier = Bytes.toBytes("c1");
|
||||
long timeStamp = System.currentTimeMillis();
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor(family));
|
||||
final WAL wal = hrs.getWAL(curRegionInfo);
|
||||
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
|
||||
|
|
|
@ -150,7 +150,7 @@ public class TestRegionSplitPolicy {
|
|||
*/
|
||||
@Test
|
||||
public void testCustomPolicy() throws IOException {
|
||||
HTableDescriptor myHtd = new HTableDescriptor();
|
||||
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf("foobar"));
|
||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||
KeyPrefixRegionSplitPolicy.class.getName());
|
||||
myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));
|
||||
|
@ -259,7 +259,7 @@ public class TestRegionSplitPolicy {
|
|||
|
||||
@Test
|
||||
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
|
||||
HTableDescriptor myHtd = new HTableDescriptor();
|
||||
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf("foobar"));
|
||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||
DelimitedKeyPrefixRegionSplitPolicy.class.getName());
|
||||
myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TestWALActionsListener {
|
|||
KeyValue kv = new KeyValue(b,b,b);
|
||||
WALEdit edit = new WALEdit();
|
||||
edit.add(kv);
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
|
||||
htd.addFamily(new HColumnDescriptor(b));
|
||||
|
||||
final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(),
|
||||
|
|
|
@ -202,7 +202,7 @@ public class TestReplicationSourceManager {
|
|||
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes());
|
||||
final AtomicLong sequenceId = new AtomicLong(1);
|
||||
manager.init();
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("tableame"));
|
||||
htd.addFamily(new HColumnDescriptor(f1));
|
||||
// Testing normal log rolling every 20
|
||||
for(long i = 1; i < 101; i++) {
|
||||
|
|
|
@ -245,7 +245,7 @@ public class TestWALFactory {
|
|||
try {
|
||||
HRegionInfo info = new HRegionInfo(tableName,
|
||||
null,null, false);
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
||||
final WAL wal = wals.getWAL(info.getEncodedNameAsBytes());
|
||||
|
||||
|
@ -366,7 +366,7 @@ public class TestWALFactory {
|
|||
final AtomicLong sequenceId = new AtomicLong(1);
|
||||
final int total = 20;
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor(tableName.getName()));
|
||||
|
||||
for (int i = 0; i < total; i++) {
|
||||
|
@ -599,7 +599,7 @@ public class TestWALFactory {
|
|||
final DumbWALActionsListener visitor = new DumbWALActionsListener();
|
||||
final AtomicLong sequenceId = new AtomicLong(1);
|
||||
long timestamp = System.currentTimeMillis();
|
||||
HTableDescriptor htd = new HTableDescriptor();
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
htd.addFamily(new HColumnDescriptor("column"));
|
||||
|
||||
HRegionInfo hri = new HRegionInfo(tableName,
|
||||
|
|
Loading…
Reference in New Issue