HBASE-18721 Cleanup unused configs and private declaration
This commit is contained in:
parent
53c9516834
commit
8a800c3f19
|
@ -58,12 +58,6 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class BufferedMutatorImpl implements BufferedMutator {
|
||||
/**
|
||||
* Key to use setting non-default BufferedMutator implementation
|
||||
* classname via Configuration.
|
||||
*/
|
||||
public static final String HBASE_BUFFEREDMUTATOR_CLASSNAME_KEY =
|
||||
"hbase.client.bufferedmutator.classname";
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(BufferedMutatorImpl.class);
|
||||
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public enum RegionOpeningState {
|
||||
|
||||
OPENED,
|
||||
|
||||
ALREADY_OPENED,
|
||||
|
||||
FAILED_OPENING;
|
||||
}
|
|
@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCata
|
|||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -243,36 +242,6 @@ public final class ResponseConverter {
|
|||
return ProtobufUtil.getRegionInfos(proto);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the region opening state from a OpenRegionResponse
|
||||
*
|
||||
* @param proto the OpenRegionResponse
|
||||
* @return the region opening state
|
||||
*/
|
||||
public static RegionOpeningState getRegionOpeningState
|
||||
(final OpenRegionResponse proto) {
|
||||
if (proto == null || proto.getOpeningStateCount() != 1) return null;
|
||||
return RegionOpeningState.valueOf(
|
||||
proto.getOpeningState(0).name());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of region opening state from a OpenRegionResponse
|
||||
*
|
||||
* @param proto the OpenRegionResponse
|
||||
* @return the list of region opening state
|
||||
*/
|
||||
public static List<RegionOpeningState> getRegionOpeningStateList(
|
||||
final OpenRegionResponse proto) {
|
||||
if (proto == null) return null;
|
||||
List<RegionOpeningState> regionOpeningStates = new ArrayList<>(proto.getOpeningStateCount());
|
||||
for (int i = 0; i < proto.getOpeningStateCount(); i++) {
|
||||
regionOpeningStates.add(RegionOpeningState.valueOf(
|
||||
proto.getOpeningState(i).name()));
|
||||
}
|
||||
return regionOpeningStates;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the region is closed from a CloseRegionResponse
|
||||
*
|
||||
|
|
|
@ -572,16 +572,6 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
|||
return comparator.compareTo(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
|
||||
}
|
||||
|
||||
/**
|
||||
* Comparator that compares row component only of a Cell
|
||||
*/
|
||||
public static class RowComparator extends CellComparator {
|
||||
@Override
|
||||
public int compare(Cell a, Cell b) {
|
||||
return compareRows(a, b);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link CellComparator} for <code>hbase:meta</code> catalog table
|
||||
* {@link KeyValue}s.
|
||||
|
|
|
@ -1551,25 +1551,6 @@ public final class CellUtil {
|
|||
};
|
||||
}
|
||||
|
||||
private static final Iterator<Tag> EMPTY_TAGS_ITR = new Iterator<Tag>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IT_NO_SUCH_ELEMENT",
|
||||
justification="Intentional")
|
||||
public Tag next() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Util method to iterate through the tags in the given cell.
|
||||
*
|
||||
|
|
|
@ -153,9 +153,6 @@ public final class HConstants {
|
|||
/** Cluster is standalone or pseudo-distributed */
|
||||
public static final boolean CLUSTER_IS_LOCAL = false;
|
||||
|
||||
/** Cluster is fully-distributed */
|
||||
public static final boolean CLUSTER_IS_DISTRIBUTED = true;
|
||||
|
||||
/** Default value for cluster distributed mode */
|
||||
public static final boolean DEFAULT_CLUSTER_DISTRIBUTED = CLUSTER_IS_LOCAL;
|
||||
|
||||
|
@ -205,15 +202,6 @@ public final class HConstants {
|
|||
/** Default client port that the zookeeper listens on */
|
||||
public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181;
|
||||
|
||||
/**
|
||||
* Parameter name for the wait time for the recoverable zookeeper
|
||||
*/
|
||||
public static final String ZOOKEEPER_RECOVERABLE_WAITTIME =
|
||||
"hbase.zookeeper.recoverable.waittime";
|
||||
|
||||
/** Default wait time for the recoverable zookeeper */
|
||||
public static final long DEFAULT_ZOOKEPER_RECOVERABLE_WAITIME = 10000;
|
||||
|
||||
/** Parameter name for the root dir in ZK for this cluster */
|
||||
public static final String ZOOKEEPER_ZNODE_PARENT = "zookeeper.znode.parent";
|
||||
|
||||
|
@ -266,9 +254,6 @@ public final class HConstants {
|
|||
/** Parameter name for what master implementation to use. */
|
||||
public static final String MASTER_IMPL= "hbase.master.impl";
|
||||
|
||||
/** Parameter name for what hbase client implementation to use. */
|
||||
public static final String HBASECLIENT_IMPL= "hbase.hbaseclient.impl";
|
||||
|
||||
/** Parameter name for how often threads should wake up */
|
||||
public static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
|
||||
|
||||
|
@ -334,13 +319,6 @@ public final class HConstants {
|
|||
/** Any artifacts left from migration can be moved here */
|
||||
public static final String MIGRATION_NAME = ".migration";
|
||||
|
||||
/**
|
||||
* The directory from which co-processor/custom filter jars can be loaded
|
||||
* dynamically by the region servers. This value can be overridden by the
|
||||
* hbase.dynamic.jars.dir config.
|
||||
*/
|
||||
public static final String LIB_DIR = "lib";
|
||||
|
||||
/** Used to construct the name of the compaction directory during compaction */
|
||||
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
|
||||
|
||||
|
@ -620,11 +598,6 @@ public final class HConstants {
|
|||
// public static final int FOREVER = -1;
|
||||
public static final int FOREVER = Integer.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Seconds in a week
|
||||
*/
|
||||
public static final int WEEK_IN_SECONDS = 7 * 24 * 3600;
|
||||
|
||||
/**
|
||||
* Seconds in a day, hour and minute
|
||||
*/
|
||||
|
@ -654,17 +627,6 @@ public final class HConstants {
|
|||
|
||||
public static final String REGION_IMPL = "hbase.hregion.impl";
|
||||
|
||||
/** modifyTable op for replacing the table descriptor */
|
||||
@InterfaceAudience.Private
|
||||
public static enum Modify {
|
||||
CLOSE_REGION,
|
||||
TABLE_COMPACT,
|
||||
TABLE_FLUSH,
|
||||
TABLE_MAJOR_COMPACT,
|
||||
TABLE_SET_HTD,
|
||||
TABLE_SPLIT
|
||||
}
|
||||
|
||||
/**
|
||||
* Scope tag for locally scoped data.
|
||||
* This data will not be replicated.
|
||||
|
@ -953,9 +915,6 @@ public final class HConstants {
|
|||
/** Maximum time to retry for a failed bulk load request */
|
||||
public static final String BULKLOAD_MAX_RETRIES_NUMBER = "hbase.bulkload.retries.number";
|
||||
|
||||
/** HBCK special code name used as server name when manipulating ZK nodes */
|
||||
public static final String HBCK_CODE_NAME = "HBCKServerName";
|
||||
|
||||
public static final String KEY_FOR_HOSTNAME_SEEN_BY_MASTER =
|
||||
"hbase.regionserver.hostname.seen.by.master";
|
||||
|
||||
|
@ -1055,13 +1014,6 @@ public final class HConstants {
|
|||
"hbase.regionserver.replication.handler.count";
|
||||
public static final int DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT = 3;
|
||||
|
||||
public static final String MASTER_HANDLER_COUNT = "hbase.master.handler.count";
|
||||
public static final int DEFAULT_MASTER_HANLDER_COUNT = 25;
|
||||
|
||||
/** Conf key that specifies timeout value to wait for a region ready */
|
||||
public static final String LOG_REPLAY_WAIT_REGION_TIMEOUT =
|
||||
"hbase.master.log.replay.wait.region.timeout";
|
||||
|
||||
/** Conf key for enabling meta replication */
|
||||
public static final String USE_META_REPLICAS = "hbase.meta.replicas.use";
|
||||
public static final boolean DEFAULT_USE_META_REPLICAS = false;
|
||||
|
|
|
@ -743,42 +743,6 @@ public final class ByteBufferUtils {
|
|||
return l1 - l2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to convert
|
||||
* to corresponding Big Endian value and then do compare. We do all writes in Big Endian format.
|
||||
*/
|
||||
private static boolean lessThanUnsignedLong(long x1, long x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Long.reverseBytes(x1);
|
||||
x2 = Long.reverseBytes(x2);
|
||||
}
|
||||
return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to convert
|
||||
* to corresponding Big Endian value and then do compare. We do all writes in Big Endian format.
|
||||
*/
|
||||
private static boolean lessThanUnsignedInt(int x1, int x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Integer.reverseBytes(x1);
|
||||
x2 = Integer.reverseBytes(x2);
|
||||
}
|
||||
return (x1 & 0xffffffffL) < (x2 & 0xffffffffL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to convert
|
||||
* to corresponding Big Endian value and then do compare. We do all writes in Big Endian format.
|
||||
*/
|
||||
private static boolean lessThanUnsignedShort(short x1, short x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Short.reverseBytes(x1);
|
||||
x2 = Short.reverseBytes(x2);
|
||||
}
|
||||
return (x1 & 0xffff) < (x2 & 0xffff);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a short value at the given buffer's offset.
|
||||
* @param buffer
|
||||
|
|
|
@ -1509,51 +1509,6 @@ public class Bytes implements Comparable<Bytes> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if x1 is less than x2, when both values are treated as
|
||||
* unsigned long.
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to
|
||||
* convert to corresponding Big Endian value and then do compare. We do all writes in
|
||||
* Big Endian format.
|
||||
*/
|
||||
static boolean lessThanUnsignedLong(long x1, long x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Long.reverseBytes(x1);
|
||||
x2 = Long.reverseBytes(x2);
|
||||
}
|
||||
return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if x1 is less than x2, when both values are treated as
|
||||
* unsigned int.
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to
|
||||
* convert to corresponding Big Endian value and then do compare. We do all writes in
|
||||
* Big Endian format.
|
||||
*/
|
||||
static boolean lessThanUnsignedInt(int x1, int x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Integer.reverseBytes(x1);
|
||||
x2 = Integer.reverseBytes(x2);
|
||||
}
|
||||
return (x1 & 0xffffffffL) < (x2 & 0xffffffffL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if x1 is less than x2, when both values are treated as
|
||||
* unsigned short.
|
||||
* Both values are passed as is read by Unsafe. When platform is Little Endian, have to
|
||||
* convert to corresponding Big Endian value and then do compare. We do all writes in
|
||||
* Big Endian format.
|
||||
*/
|
||||
static boolean lessThanUnsignedShort(short x1, short x2) {
|
||||
if (UnsafeAccess.littleEndian) {
|
||||
x1 = Short.reverseBytes(x1);
|
||||
x2 = Short.reverseBytes(x2);
|
||||
}
|
||||
return (x1 & 0xffff) < (x2 & 0xffff);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lexicographically compare two arrays.
|
||||
*
|
||||
|
|
|
@ -47,12 +47,6 @@ public class DisableTableProcedure
|
|||
|
||||
private Boolean traceEnabled = null;
|
||||
|
||||
enum MarkRegionOfflineOpResult {
|
||||
MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL,
|
||||
BULK_ASSIGN_REGIONS_FAILED,
|
||||
MARK_ALL_REGIONS_OFFLINE_INTERRUPTED,
|
||||
}
|
||||
|
||||
public DisableTableProcedure() {
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ import org.apache.hadoop.hbase.quotas.QuotaExceededException;
|
|||
@InterfaceAudience.Private
|
||||
public class NamespaceAuditor {
|
||||
private static final Log LOG = LogFactory.getLog(NamespaceAuditor.class);
|
||||
static final String NS_AUDITOR_INIT_TIMEOUT = "hbase.namespace.auditor.init.timeout";
|
||||
static final int DEFAULT_NS_AUDITOR_INIT_TIMEOUT = 120000;
|
||||
private NamespaceStateManager stateManager;
|
||||
private MasterServices masterServices;
|
||||
|
||||
|
|
|
@ -33,15 +33,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
|
|||
@InterfaceAudience.Private
|
||||
public interface QuotaSnapshotStore<T> {
|
||||
|
||||
/**
|
||||
* The current state of a table with respect to the policy set forth by a quota.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public enum ViolationState {
|
||||
IN_VIOLATION,
|
||||
IN_OBSERVANCE,
|
||||
}
|
||||
|
||||
/**
|
||||
* Singleton to represent a table without a quota defined. It is never in violation.
|
||||
*/
|
||||
|
|
|
@ -204,10 +204,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY =
|
||||
"hbase.hregion.scan.loadColumnFamiliesOnDemand";
|
||||
|
||||
/** Config key for using mvcc pre-assign feature for put */
|
||||
public static final String HREGION_MVCC_PRE_ASSIGN = "hbase.hregion.mvcc.preassign";
|
||||
public static final boolean DEFAULT_HREGION_MVCC_PRE_ASSIGN = true;
|
||||
|
||||
public static final String HREGION_UNASSIGN_FOR_FNFE = "hbase.hregion.unassign.for.fnfe";
|
||||
public static final boolean DEFAULT_HREGION_UNASSIGN_FOR_FNFE = true;
|
||||
|
||||
|
|
|
@ -228,9 +228,6 @@ public class HRegionServer extends HasThread implements
|
|||
// Time to pause if master says 'please hold'. Make configurable if needed.
|
||||
private static final int INIT_PAUSE_TIME_MS = 1000;
|
||||
|
||||
public static final String REGION_LOCK_AWAIT_TIME_SEC =
|
||||
"hbase.regionserver.region.lock.await.time.sec";
|
||||
public static final int DEFAULT_REGION_LOCK_AWAIT_TIME_SEC = 300; // 5 min
|
||||
private static final Log LOG = LogFactory.getLog(HRegionServer.class);
|
||||
|
||||
/**
|
||||
|
|
|
@ -159,13 +159,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
|
|||
protected final long readPt;
|
||||
private boolean topChanged = false;
|
||||
|
||||
// used by the injection framework to test race between StoreScanner construction and compaction
|
||||
enum StoreScannerCompactionRace {
|
||||
BEFORE_SEEK,
|
||||
AFTER_SEEK,
|
||||
COMPACT_COMPLETE
|
||||
}
|
||||
|
||||
/** An internal constructor. */
|
||||
private StoreScanner(Optional<Store> store, Scan scan, ScanInfo scanInfo,
|
||||
int numColumns, long readPt, boolean cacheBlocks, ScanType scanType) {
|
||||
|
|
|
@ -70,23 +70,6 @@ public interface DeleteTracker extends ShipperListener {
|
|||
*/
|
||||
void reset();
|
||||
|
||||
/**
|
||||
* Return codes for comparison of two Deletes.
|
||||
* <p>
|
||||
* The codes tell the merging function what to do.
|
||||
* <p>
|
||||
* INCLUDE means add the specified Delete to the merged list. NEXT means move to the next element
|
||||
* in the specified list(s).
|
||||
*/
|
||||
enum DeleteCompare {
|
||||
INCLUDE_OLD_NEXT_OLD,
|
||||
INCLUDE_OLD_NEXT_BOTH,
|
||||
INCLUDE_NEW_NEXT_NEW,
|
||||
INCLUDE_NEW_NEXT_BOTH,
|
||||
NEXT_OLD,
|
||||
NEXT_NEW
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns codes for delete result. The codes tell the ScanQueryMatcher whether the kv is deleted
|
||||
* and why. Based on the delete result, the ScanQueryMatcher will decide the next operation
|
||||
|
|
|
@ -234,7 +234,7 @@ public class MiniZooKeeperCluster {
|
|||
standaloneServerFactory = new NIOServerCnxnFactory();
|
||||
standaloneServerFactory.configure(
|
||||
new InetSocketAddress(currentClientPort),
|
||||
configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, 1000));
|
||||
configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS));
|
||||
} catch (BindException e) {
|
||||
LOG.debug("Failed binding ZK Server to client port: " +
|
||||
currentClientPort, e);
|
||||
|
|
|
@ -789,35 +789,7 @@ public class TestAssignmentManager {
|
|||
}
|
||||
|
||||
private class GoodSplitExecutor extends NoopRsExecutor {
|
||||
|
||||
/*
|
||||
@Override
|
||||
protected RegionOpeningState execOpenRegion(ServerName server, RegionOpenInfo openReq)
|
||||
throws IOException {
|
||||
sendTransitionReport(server, openReq.getRegion(), TransitionCode.OPENED);
|
||||
// Concurrency?
|
||||
// Now update the state of our cluster in regionsToRegionServers.
|
||||
SortedSet<byte []> regions = regionsToRegionServers.get(server);
|
||||
if (regions == null) {
|
||||
regions = new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
|
||||
regionsToRegionServers.put(server, regions);
|
||||
}
|
||||
HRegionInfo hri = HRegionInfo.convert(openReq.getRegion());
|
||||
if (regions.contains(hri.getRegionName())) {
|
||||
throw new UnsupportedOperationException(hri.getRegionNameAsString());
|
||||
}
|
||||
regions.add(hri.getRegionName());
|
||||
return RegionOpeningState.OPENED;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CloseRegionResponse execCloseRegion(ServerName server, byte[] regionName)
|
||||
throws IOException {
|
||||
HRegionInfo hri = am.getRegionInfo(regionName);
|
||||
sendTransitionReport(server, HRegionInfo.convert(hri), TransitionCode.CLOSED);
|
||||
return CloseRegionResponse.newBuilder().setClosed(true).build();
|
||||
}*/
|
||||
|
||||
}
|
||||
|
||||
private void collectAssignmentManagerMetrics() {
|
||||
|
|
|
@ -153,7 +153,8 @@ public abstract class AbstractTestWALReplay {
|
|||
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
|
||||
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
|
||||
}
|
||||
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
|
||||
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,
|
||||
HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG) ?
|
||||
RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
|
||||
this.wals = new WALFactory(conf, null, currentTest.getMethodName());
|
||||
}
|
||||
|
|
|
@ -86,7 +86,6 @@ import org.apache.hadoop.hbase.filter.SkipFilter;
|
|||
import org.apache.hadoop.hbase.filter.ValueFilter;
|
||||
import org.apache.hadoop.hbase.filter.WhileMatchFilter;
|
||||
import org.apache.hadoop.hbase.io.WritableWithSize;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -172,7 +171,8 @@ class HbaseObjectWritableFor96Migration implements Writable, WritableWithSize, C
|
|||
|
||||
// Hbase types
|
||||
addToMap(HColumnDescriptor.class, code++);
|
||||
addToMap(HConstants.Modify.class, code++);
|
||||
// HConstants.Modify no longer exists; increase code so other classes stay the same.
|
||||
code++;
|
||||
|
||||
// We used to have a class named HMsg but its been removed. Rather than
|
||||
// just axe it, use following random Integer class -- we just chose any
|
||||
|
@ -262,7 +262,8 @@ class HbaseObjectWritableFor96Migration implements Writable, WritableWithSize, C
|
|||
code++;
|
||||
//addToMap(HServerLoad.class, code++);
|
||||
|
||||
addToMap(RegionOpeningState.class, code++);
|
||||
// RegionOpeningState no longer exists; increase code so other classes stay the same.
|
||||
code++;
|
||||
|
||||
addToMap(HTableDescriptor[].class, code++);
|
||||
|
||||
|
|
|
@ -160,7 +160,6 @@ class HBaseConnectionKey(c: Configuration) extends Logging {
|
|||
HConstants.ZOOKEEPER_QUORUM,
|
||||
HConstants.ZOOKEEPER_ZNODE_PARENT,
|
||||
HConstants.ZOOKEEPER_CLIENT_PORT,
|
||||
HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME,
|
||||
HConstants.HBASE_CLIENT_PAUSE,
|
||||
HConstants.HBASE_CLIENT_RETRIES_NUMBER,
|
||||
HConstants.HBASE_RPC_TIMEOUT_KEY,
|
||||
|
|
Loading…
Reference in New Issue