HBASE-19301 Provide way for CPs to create short circuited connection with custom configurations.
This commit is contained in:
parent
4df2dffbab
commit
f7331f9ceb
|
@ -127,6 +127,7 @@ public final class ConnectionUtils {
|
|||
* localhost if the invocation target is 'this' server; save on network and protobuf
|
||||
* invocations.
|
||||
*/
|
||||
// TODO This has to still do PB marshalling/unmarshalling stuff. Check how/whether we can avoid.
|
||||
@VisibleForTesting // Class is visible so can assert we are short-circuiting when expected.
|
||||
public static class ShortCircuitingClusterConnection extends ConnectionImplementation {
|
||||
private final ServerName serverName;
|
||||
|
|
|
@ -53,6 +53,8 @@ public interface Server extends Abortable, Stoppable {
|
|||
*/
|
||||
Connection getConnection();
|
||||
|
||||
Connection createConnection(Configuration conf) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}.
|
||||
*
|
||||
|
|
|
@ -19,13 +19,16 @@
|
|||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -41,15 +44,31 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment<Mas
|
|||
* struggling or it is on the other side of a network partition. Any use of Connection from
|
||||
* inside a Coprocessor must be able to handle all such hiccups.
|
||||
*
|
||||
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
|
||||
* <p>Using this Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
|
||||
* protobuf marshalling/unmarshalling).
|
||||
*
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
* <p>
|
||||
* Note: If you want to create Connection with your own Configuration and NOT use the Master's
|
||||
* Connection (though its cache of locations will be warm, and its life-cycle is not the concern
|
||||
* of the CP), see {@link #createConnection(Configuration)}.
|
||||
* @return The host's Connection to the Cluster.
|
||||
*/
|
||||
Connection getConnection();
|
||||
|
||||
/**
|
||||
* Creates a cluster connection using the passed configuration.
|
||||
* <p>Using this Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
* <p>
|
||||
* Note: HBase will NOT cache/maintain this Connection. If Coprocessors need to cache and reuse
|
||||
* this connection, it has to be done by Coprocessors. Also make sure to close it after use.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return Connection created using the passed conf.
|
||||
*/
|
||||
Connection createConnection(Configuration conf) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a MetricRegistry that can be used to track metrics at the master level.
|
||||
*
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
|
@ -62,13 +64,29 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment<Reg
|
|||
*
|
||||
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
|
||||
* protobuf marshalling/unmarshalling).
|
||||
*
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
*<p>
|
||||
* Note: If you want to create Connection with your own Configuration and NOT use the RegionServer
|
||||
* Connection (though its cache of locations will be warm, and its life-cycle is not the concern
|
||||
* of the CP), see {@link #createConnection(Configuration)}.
|
||||
* @return The host's Connection to the Cluster.
|
||||
*/
|
||||
Connection getConnection();
|
||||
|
||||
/**
|
||||
* Creates a cluster connection using the passed configuration.
|
||||
* <p>Using this Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
* <p>
|
||||
* Note: HBase will NOT cache/maintain this Connection. If Coprocessors need to cache and reuse
|
||||
* this connection, it has to be done by Coprocessors. Also make sure to close it after use.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return Connection created using the passed conf.
|
||||
*/
|
||||
Connection createConnection(Configuration conf) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a MetricRegistry that can be used to track metrics at the region server level. All
|
||||
* metrics tracked at this level will be shared by all the coprocessor instances
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
|
@ -49,13 +52,29 @@ public interface RegionServerCoprocessorEnvironment
|
|||
*
|
||||
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
|
||||
* protobuf marshalling/unmarshalling).
|
||||
*
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
*<p>
|
||||
* Note: If you want to create Connection with your own Configuration and NOT use the RegionServer
|
||||
* Connection (though its cache of locations will be warm, and its life-cycle is not the concern
|
||||
* of the CP), see {@link #createConnection(Configuration)}.
|
||||
* @return The host's Connection to the Cluster.
|
||||
*/
|
||||
Connection getConnection();
|
||||
|
||||
/**
|
||||
* Creates a cluster connection using the passed configuration.
|
||||
* <p>Using this Connection to get at a local resource -- say a Region that is on the local
|
||||
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
|
||||
* short-circuit of the RPC framework to make a direct invocation avoiding RPC.
|
||||
* <p>
|
||||
* Note: HBase will NOT cache/maintain this Connection. If Coprocessors need to cache and reuse
|
||||
* this connection, it has to be done by Coprocessors. Also make sure to close it after use.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return Connection created using the passed conf.
|
||||
*/
|
||||
Connection createConnection(Configuration conf) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a MetricRegistry that can be used to track metrics at the region server level.
|
||||
*
|
||||
|
|
|
@ -78,16 +78,14 @@ public class MasterCoprocessorHost
|
|||
*/
|
||||
private static class MasterEnvironment extends BaseEnvironment<MasterCoprocessor>
|
||||
implements MasterCoprocessorEnvironment {
|
||||
private final Connection connection;
|
||||
private final ServerName serverName;
|
||||
private final boolean supportGroupCPs;
|
||||
private final MetricRegistry metricRegistry;
|
||||
private final MasterServices services;
|
||||
|
||||
public MasterEnvironment(final MasterCoprocessor impl, final int priority, final int seq,
|
||||
final Configuration conf, final MasterServices services) {
|
||||
super(impl, priority, seq, conf);
|
||||
this.connection = services.getConnection();
|
||||
this.serverName = services.getServerName();
|
||||
this.services = services;
|
||||
supportGroupCPs = !useLegacyMethod(impl.getClass(),
|
||||
"preBalanceRSGroup", ObserverContext.class, String.class);
|
||||
this.metricRegistry =
|
||||
|
@ -96,12 +94,17 @@ public class MasterCoprocessorHost
|
|||
|
||||
@Override
|
||||
public ServerName getServerName() {
|
||||
return this.serverName;
|
||||
return this.services.getServerName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection getConnection() {
|
||||
return this.connection;
|
||||
return this.services.getConnection();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return this.services.createConnection(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -3715,4 +3715,11 @@ public class HRegionServer extends HasThread implements
|
|||
public NettyEventLoopGroupConfig getEventLoopGroupConfig() {
|
||||
return eventLoopGroupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
User user = UserProvider.instantiate(conf).getCurrent();
|
||||
return ConnectionUtils.createShortCircuitConnection(conf, null, user, this.serverName,
|
||||
this.rpcServices, this.rpcServices);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,9 +114,7 @@ public class RegionCoprocessorHost
|
|||
private Region region;
|
||||
ConcurrentMap<String, Object> sharedData;
|
||||
private final MetricRegistry metricRegistry;
|
||||
private final Connection connection;
|
||||
private final ServerName serverName;
|
||||
private final OnlineRegions onlineRegions;
|
||||
private final RegionServerServices services;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -128,11 +126,8 @@ public class RegionCoprocessorHost
|
|||
final RegionServerServices services, final ConcurrentMap<String, Object> sharedData) {
|
||||
super(impl, priority, seq, conf);
|
||||
this.region = region;
|
||||
// Mocks may have services as null at test time.
|
||||
this.connection = services != null? services.getConnection(): null;
|
||||
this.serverName = services != null? services.getServerName(): null;
|
||||
this.sharedData = sharedData;
|
||||
this.onlineRegions = services;
|
||||
this.services = services;
|
||||
this.metricRegistry =
|
||||
MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName());
|
||||
}
|
||||
|
@ -144,17 +139,23 @@ public class RegionCoprocessorHost
|
|||
}
|
||||
|
||||
public OnlineRegions getOnlineRegions() {
|
||||
return this.onlineRegions;
|
||||
return this.services;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection getConnection() {
|
||||
return this.connection;
|
||||
// Mocks may have services as null at test time.
|
||||
return services != null ? services.getConnection() : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return services != null ? this.services.createConnection(conf) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServerName getServerName() {
|
||||
return this.serverName;
|
||||
return services != null? services.getServerName(): null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -210,9 +210,7 @@ public class RegionServerCoprocessorHost extends
|
|||
private static class RegionServerEnvironment extends BaseEnvironment<RegionServerCoprocessor>
|
||||
implements RegionServerCoprocessorEnvironment {
|
||||
private final MetricRegistry metricRegistry;
|
||||
private final Connection connection;
|
||||
private final ServerName serverName;
|
||||
private final OnlineRegions onlineRegions;
|
||||
private final RegionServerServices services;
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BC_UNCONFIRMED_CAST",
|
||||
justification="Intentional; FB has trouble detecting isAssignableFrom")
|
||||
|
@ -223,26 +221,29 @@ public class RegionServerCoprocessorHost extends
|
|||
for (Service service : impl.getServices()) {
|
||||
services.registerService(service);
|
||||
}
|
||||
this.onlineRegions = services;
|
||||
this.connection = services.getConnection();
|
||||
this.serverName = services.getServerName();
|
||||
this.services = services;
|
||||
this.metricRegistry =
|
||||
MetricsCoprocessor.createRegistryForRSCoprocessor(impl.getClass().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public OnlineRegions getOnlineRegions() {
|
||||
return this.onlineRegions;
|
||||
return this.services;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServerName getServerName() {
|
||||
return this.serverName;
|
||||
return this.services.getServerName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection getConnection() {
|
||||
return this.connection;
|
||||
return this.services.getConnection();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return this.services.createConnection(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.replication.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -31,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||
|
@ -203,5 +206,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.locking.EntityLock;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
|
@ -329,4 +330,9 @@ public class MockRegionServerServices implements RegionServerServices {
|
|||
public RegionServerSpaceQuotaManager getRegionServerSpaceQuotaManager() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.MasterSwitchType;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
|
@ -460,4 +461,9 @@ public class MockNoopMasterServices implements MasterServices, Server {
|
|||
public FileSystem getFileSystem() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -672,4 +673,9 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
|
|||
throws ServiceException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
|
@ -343,5 +344,10 @@ public class TestActiveMasterManager {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdge;
|
||||
|
@ -272,6 +273,11 @@ public class TestHFileCleaner {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
|
@ -211,5 +212,10 @@ public class TestHFileLinkCleaner {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationFactory;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationQueues;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
|
||||
|
@ -319,6 +320,11 @@ public class TestLogsCleaner {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class FaultyZooKeeperWatcher extends ZKWatcher {
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationException;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationFactory;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||
|
@ -330,6 +331,11 @@ public class TestReplicationHFileCleaner {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class FaultyZooKeeperWatcher extends ZKWatcher {
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.Iterator;
|
||||
|
||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
|
@ -857,6 +859,11 @@ public class TestHeapMemoryManager {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class CustomHeapMemoryTuner implements HeapMemoryTuner {
|
||||
|
|
|
@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.LongAdder;
|
||||
|
||||
|
@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.SplitLogCounters;
|
|||
import org.apache.hadoop.hbase.SplitLogTask;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorType;
|
||||
|
@ -157,6 +159,11 @@ public class TestSplitLogWorker {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems)
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.DamagedWALException;
|
||||
|
@ -569,6 +570,11 @@ public class TestWALLockup {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class DummyWALActionsListener extends WALActionsListener.Base {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||
|
@ -485,5 +486,10 @@ public class TestReplicationStateHBaseImpl {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||
|
@ -217,5 +218,10 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||
|
@ -317,5 +319,10 @@ public class TestReplicationTrackerZKImpl {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
|
@ -731,5 +732,10 @@ public abstract class TestReplicationSourceManager {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -323,6 +323,11 @@ public class TestTokenAuthentication {
|
|||
public Connection getConnection() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
started = true;
|
||||
|
@ -410,6 +415,11 @@ public class TestTokenAuthentication {
|
|||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Parameters(name = "{index}: rpcServerImpl={0}")
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
|
||||
|
||||
|
@ -144,4 +145,9 @@ public class MockServer implements Server {
|
|||
public boolean isStopping() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Connection createConnection(Configuration conf) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue