HBASE-19007 Align Services Interfaces in Master and RegionServer

Purges Server, MasterServices, and RegionServerServices from
CoprocessorEnvironments. Replaces removed functionality with
a set of carefully curated methods on the *CoprocessorEnvironment
implementations (Varies by CoprocessorEnvironment in that the
MasterCoprocessorEnvironment has Master-type facility exposed,
and so on).

A few core Coprocessors that should long ago have been converted
to be integral, violate their context; e.g. a RegionCoprocessor
wants free access to a hosting RegionServer (which may or may not
be present). Rather than let these violators make us corrupte the
CP API, instead, we've made up a hacky system that allows core
Coprocessors access to internals. A new CoreCoprocessor Annotation
has been introduced. When loading Coprocessors, if the instance is
annotated CoreCoprocessor, we pass it an Environment that has been
padded w/ extra-stuff. On invocation, CoreCoprocessors know how to
route their way to these extras in their environment.

See the *CoprocessoHost for how the do the check for CoreCoprocessor
and pass a fatter *Coprocessor, one that allows getting of either
a RegionServerService or MasterService out of the environment
via Marker Interfaces.

Removed org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices

M hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
 This Endpoint has been deprecated because its functionality has been
 moved to core. Marking it a CoreCoprocessor in the meantime to
 minimize change.

M hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 This should be integral to hbase. Meantime, marking it CoreCoprocessor.

M hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
 Added doc on where it is used and added back a few methods we'd
removed.

A hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoreCoprocessor.java
 New annotation for core hbase coprocessors. They get richer environment
 on coprocessor loading.

A hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
A hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
 Marker Interface to access extras if present.

M hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
  Purge MasterServices access. Allow CPs a Connection.

M hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
  Purge RegionServerServices access. Allow CPs a Connection.

M hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java
  Purge MasterServices access. Allow CPs a Connection.

M hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java
M hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java
  We no longer have access to MasterServices. Don't need it actually.
  Use short-circuiting Admin instead.

D hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CoprocessorRegionServerServices.java
  Removed. Not needed now we do CP Env differently.

M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
  No need to go via RSS to getOnlineTables; just use HRS.

And so on. Adds tests to ensure we can only get at extra info
if the CP has been properly marked.
This commit is contained in:
Guanghao Zhang 2017-10-16 17:12:37 +08:00 committed by Michael Stack
parent 51ceeece25
commit 00f2b18148
61 changed files with 1182 additions and 310 deletions

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -123,6 +123,46 @@ public final class ConnectionUtils {
log.info(sn + " server-side Connection retries=" + retries);
}
/**
* A ClusterConnection that will short-circuit RPC making direct invocations against the
* localhost if the invocation target is 'this' server; save on network and protobuf
* invocations.
*/
@VisibleForTesting // Class is visible so can assert we are short-circuiting when expected.
public static class ShortCircuitingClusterConnection extends ConnectionImplementation {
private final ServerName serverName;
private final AdminService.BlockingInterface localHostAdmin;
private final ClientService.BlockingInterface localHostClient;
private ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User user,
ServerName serverName, AdminService.BlockingInterface admin,
ClientService.BlockingInterface client)
throws IOException {
super(conf, pool, user);
this.serverName = serverName;
this.localHostAdmin = admin;
this.localHostClient = client;
}
@Override
public AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
return serverName.equals(sn) ? this.localHostAdmin : super.getAdmin(sn);
}
@Override
public ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
return serverName.equals(sn) ? this.localHostClient : super.getClient(sn);
}
@Override
public MasterKeepAliveConnection getKeepAliveMasterService() throws MasterNotRunningException {
if (this.localHostClient instanceof MasterService.BlockingInterface) {
return new ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
}
return super.getKeepAliveMasterService();
}
}
/**
* Creates a short-circuit connection that can bypass the RPC layer (serialization,
* deserialization, networking, etc..) when talking to a local server.
@ -142,27 +182,7 @@ public final class ConnectionUtils {
if (user == null) {
user = UserProvider.instantiate(conf).getCurrent();
}
return new ConnectionImplementation(conf, pool, user) {
@Override
public AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
return serverName.equals(sn) ? admin : super.getAdmin(sn);
}
@Override
public ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
return serverName.equals(sn) ? client : super.getClient(sn);
}
@Override
public MasterKeepAliveConnection getKeepAliveMasterService()
throws MasterNotRunningException {
if (!(client instanceof MasterService.BlockingInterface)) {
return super.getKeepAliveMasterService();
} else {
return new ShortCircuitMasterConnection((MasterService.BlockingInterface) client);
}
}
};
return new ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, client);
}
/**

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file

View File

@ -27,6 +27,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@ -54,6 +56,7 @@ import org.apache.yetus.audience.InterfaceAudience;
* Coprocessor service for bulk loads in secure mode.
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
*/
@CoreCoprocessor
@InterfaceAudience.Private
@Deprecated
public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements RegionCoprocessor {
@ -68,8 +71,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
@Override
public void start(CoprocessorEnvironment env) {
this.env = (RegionCoprocessorEnvironment)env;
assert this.env.getCoprocessorRegionServerServices() instanceof RegionServerServices;
rsServices = (RegionServerServices) this.env.getCoprocessorRegionServerServices();
rsServices = ((HasRegionServerServices)this.env).getRegionServerServices();
LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in future releases.");
LOG.warn("Secure bulk load has been integrated into HBase core.");
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
@ -73,6 +75,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import org.apache.yetus.audience.InterfaceAudience;
// TODO: Encapsulate MasterObserver functions into separate subclass.
@CoreCoprocessor
@InterfaceAudience.Private
public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
private static final Log LOG = LogFactory.getLog(RSGroupAdminEndpoint.class);
@ -86,7 +89,10 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
@Override
public void start(CoprocessorEnvironment env) throws IOException {
master = ((MasterCoprocessorEnvironment)env).getMasterServices();
if (!(env instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
}
master = ((HasMasterServices)env).getMasterServices();
groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
Class<?> clazz =

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -19,6 +19,7 @@
package org.apache.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@ -26,8 +27,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Defines the set of shared functions implemented by HBase servers (Masters
* and RegionServers).
* Defines a curated set of shared functions implemented by HBase servers (Masters
* and RegionServers). For use internally only. Be judicious adding API. Changes cause ripples
* through the code base.
*/
@InterfaceAudience.Private
public interface Server extends Abortable, Stoppable {
@ -79,4 +81,17 @@ public interface Server extends Abortable, Stoppable {
* @return The {@link ChoreService} instance for this server
*/
ChoreService getChoreService();
/**
* @return Return the FileSystem object used.
*/
// TODO: On Master, return Master's. On RegionServer, return RegionServers. The FileSystems
// may differ. TODO.
FileSystem getFileSystem();
/**
* @return True is the server is Stopping
*/
// Note: This method is not part of the Stoppable Interface.
boolean isStopping();
}

View File

@ -51,8 +51,7 @@ public class BaseEnvironment<C extends Coprocessor> implements CoprocessorEnviro
* @param impl the coprocessor instance
* @param priority chaining priority
*/
public BaseEnvironment(final C impl, final int priority,
final int seq, final Configuration conf) {
public BaseEnvironment(final C impl, final int priority, final int seq, final Configuration conf) {
this.impl = impl;
this.classLoader = impl.getClass().getClassLoader();
this.priority = priority;

View File

@ -0,0 +1,45 @@
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.yetus.audience.InterfaceAudience;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Marker annotation that denotes Coprocessors that are core to HBase.
* A Core Coprocessor is a CP that realizes a core HBase feature. Features are sometimes
* implemented first as a Coprocessor to prove viability. The idea is that once proven, they then
* migrate to core. Meantime, HBase Core Coprocessors get this annotation. No other Coprocessors
* can carry this annotation.
*/
// Core Coprocessors are generally naughty making use of HBase internals doing accesses no
// Coprocessor should be up to so we mark these special Coprocessors with this annotation and on
// Coprocessor load, we'll give these Coprocessors a 'richer' Environment with access to internals
// not allowed other Coprocessors. see the *CoprocessorHost where they do the Coprocessor loadings.
@Target(ElementType.TYPE)
@Inherited
@InterfaceAudience.Private
@Retention(RetentionPolicy.RUNTIME)
// This Annotation is not @Documented because I don't want users figuring out its mechanics.
public @interface CoreCoprocessor {}

View File

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Mark a class that it has a MasterServices accessor.
* Temporary hack until core Coprocesssors are integrated.
* @see CoreCoprocessor
* @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this
* facility as CoreCoprocessors are integated into core.
*/
@Deprecated
@InterfaceAudience.Private
public interface HasMasterServices {
/**
* @return An instance of RegionServerServices, an object NOT for Coprocessor consumption.
*/
MasterServices getMasterServices();
}

View File

@ -0,0 +1,37 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Mark a class that it has a RegionServiceServices accessor.
* Temporary hack until core Coprocesssors are integrated.
* @see CoreCoprocessor
* @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 we will not need this
* facility as CoreCoprocessors are integated into core.
*/
@Deprecated
@InterfaceAudience.Private
public interface HasRegionServerServices {
/**
* @return An instance of RegionServerServices, an object NOT for Coprocessor consumption.
*/
RegionServerServices getRegionServerServices();
}

View File

@ -19,18 +19,36 @@
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment<MasterCoprocessor> {
/** @return reference to the HMaster services */
MasterServices getMasterServices();
/**
* @return Hosting Server's ServerName
*/
ServerName getServerName();
/**
* Be careful RPC'ing from a Coprocessor context.
* RPC's will fail, stall, retry, and/or crawl because the remote side is not online, is
* struggling or it is on the other side of a network partition. Any use of Connection from
* inside a Coprocessor must be able to handle all such hiccups.
*
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
* protobuf marshalling/unmarshalling).
*
* @return The host's Connection to the Cluster.
*/
Connection getConnection();
/**
* Returns a MetricRegistry that can be used to track metrics at the master level.

View File

@ -23,9 +23,10 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@ -39,12 +40,29 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment<Reg
/** @return region information for the region this coprocessor is running on */
RegionInfo getRegionInfo();
/** @return reference to the region server services */
CoprocessorRegionServerServices getCoprocessorRegionServerServices();
/** @return shared data between all instances of this coprocessor */
ConcurrentMap<String, Object> getSharedData();
/**
* @return Hosting Server's ServerName
*/
ServerName getServerName();
/**
* Be careful RPC'ing from a Coprocessor context.
* RPC's will fail, stall, retry, and/or crawl because the remote side is not online, is
* struggling or it is on the other side of a network partition. Any use of Connection from
* inside a Coprocessor must be able to handle all such hiccups.
*
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
* protobuf marshalling/unmarshalling).
*
* @return The host's Connection to the Cluster.
*/
Connection getConnection();
/**
* Returns a MetricRegistry that can be used to track metrics at the region server level. All
* metrics tracked at this level will be shared by all the coprocessor instances

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -20,8 +20,9 @@ package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@ -30,11 +31,24 @@ import org.apache.yetus.audience.InterfaceStability;
public interface RegionServerCoprocessorEnvironment
extends CoprocessorEnvironment<RegionServerCoprocessor> {
/**
* Gets the region server services.
*
* @return the region server services
* @return Hosting Server's ServerName
*/
CoprocessorRegionServerServices getCoprocessorRegionServerServices();
ServerName getServerName();
/**
* Be careful RPC'ing from a Coprocessor context.
* RPC's will fail, stall, retry, and/or crawl because the remote side is not online, is
* struggling or it is on the other side of a network partition. Any use of Connection from
* inside a Coprocessor must be able to handle all such hiccups.
*
* <p>Using a Connection to get at a local resource -- say a Region that is on the local
* Server or using Admin Interface from a Coprocessor hosted on the Master -- will result in a
* short-circuit of the RPC framework to make a direct invocation avoiding RPC (and
* protobuf marshalling/unmarshalling).
*
* @return The host's Connection to the Cluster.
*/
Connection getConnection();
/**
* Returns a MetricRegistry that can be used to track metrics at the region server level.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -24,7 +24,6 @@ import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
import java.util.ArrayList;
@ -584,9 +583,8 @@ public abstract class RpcServer implements RpcServerInterface,
}
/**
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}
* and {@link #channelWrite(GatheringByteChannel, BufferChain)}. Only
* one of readCh or writeCh should be non-null.
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer).
* Only one of readCh or writeCh should be non-null.
*
* @param readCh read channel
* @param writeCh write channel
@ -594,7 +592,6 @@ public abstract class RpcServer implements RpcServerInterface,
* @return bytes written
* @throws java.io.IOException e
* @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)
* @see #channelWrite(GatheringByteChannel, BufferChain)
*/
private static int channelIO(ReadableByteChannel readCh,
WritableByteChannel writeCh,

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RegionInfo;
@ -41,6 +42,8 @@ import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
@ -76,14 +79,16 @@ public class MasterCoprocessorHost
*/
private static class MasterEnvironment extends BaseEnvironment<MasterCoprocessor>
implements MasterCoprocessorEnvironment {
private final MasterServices masterServices;
private final Connection connection;
private final ServerName serverName;
private final boolean supportGroupCPs;
private final MetricRegistry metricRegistry;
public MasterEnvironment(final MasterCoprocessor impl, final int priority, final int seq,
final Configuration conf, final MasterServices services) {
super(impl, priority, seq, conf);
this.masterServices = services;
this.connection = services.getConnection();
this.serverName = services.getServerName();
supportGroupCPs = !useLegacyMethod(impl.getClass(),
"preBalanceRSGroup", ObserverContext.class, String.class);
this.metricRegistry =
@ -91,8 +96,13 @@ public class MasterCoprocessorHost
}
@Override
public MasterServices getMasterServices() {
return masterServices;
public ServerName getServerName() {
return this.serverName;
}
@Override
public Connection getConnection() {
return this.connection;
}
@Override
@ -107,6 +117,29 @@ public class MasterCoprocessorHost
}
}
/**
* Special version of MasterEnvironment that exposes MasterServices for Core Coprocessors only.
* Temporary hack until Core Coprocessors are integrated into Core.
*/
private static class MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
implements HasMasterServices {
private final MasterServices masterServices;
public MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int priority,
final int seq, final Configuration conf, final MasterServices services) {
super(impl, priority, seq, conf, services);
this.masterServices = services;
}
/**
* @return An instance of MasterServices, an object NOT for general user-space Coprocessor
* consumption.
*/
public MasterServices getMasterServices() {
return this.masterServices;
}
}
private MasterServices masterServices;
public MasterCoprocessorHost(final MasterServices services, final Configuration conf) {
@ -122,8 +155,6 @@ public class MasterCoprocessorHost
loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY);
}
@Override
public MasterEnvironment createEnvironment(final MasterCoprocessor instance, final int priority,
final int seq, final Configuration conf) {
@ -131,7 +162,10 @@ public class MasterCoprocessorHost
for (Service service : instance.getServices()) {
masterServices.registerService(service);
}
return new MasterEnvironment(instance, priority, seq, conf, masterServices);
// If a CoreCoprocessor, return a 'richer' environment, one laden with MasterServices.
return instance.getClass().isAnnotationPresent(CoreCoprocessor.class)?
new MasterEnvironmentForCoreCoprocessors(instance, priority, seq, conf, masterServices):
new MasterEnvironment(instance, priority, seq, conf, masterServices);
}
@Override

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -55,7 +55,10 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe
import com.google.protobuf.Service;
/**
* Services Master supplies
* A curated subset of services provided by {@link HMaster}.
* For use internally only. Passed to Managers, Services and Chores so can pass less-than-a
* full-on HMaster at test-time. Be judicious adding API. Changes cause ripples through
* the code base.
*/
@InterfaceAudience.Private
public interface MasterServices extends Server {
@ -414,11 +417,6 @@ public interface MasterServices extends Server {
*/
public LoadBalancer getLoadBalancer();
/**
* @return True if this master is stopping.
*/
boolean isStopping();
boolean isSplitOrMergeEnabled(MasterSwitchType switchType);
/**

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
/**
@ -64,9 +63,8 @@ public class MasterSpaceQuotaObserver implements MasterCoprocessor, MasterObserv
if (!quotasEnabled) {
return;
}
final MasterServices master = ctx.getEnvironment().getMasterServices();
final Connection conn = master.getConnection();
Quotas quotas = QuotaUtil.getTableQuota(master.getConnection(), tableName);
final Connection conn = ctx.getEnvironment().getConnection();
Quotas quotas = QuotaUtil.getTableQuota(conn, tableName);
if (quotas != null && quotas.hasSpace()) {
QuotaSettings settings = QuotaSettingsFactory.removeTableSpaceLimit(tableName);
try (Admin admin = conn.getAdmin()) {
@ -82,9 +80,8 @@ public class MasterSpaceQuotaObserver implements MasterCoprocessor, MasterObserv
if (!quotasEnabled) {
return;
}
final MasterServices master = ctx.getEnvironment().getMasterServices();
final Connection conn = master.getConnection();
Quotas quotas = QuotaUtil.getNamespaceQuota(master.getConnection(), namespace);
final Connection conn = ctx.getEnvironment().getConnection();
Quotas quotas = QuotaUtil.getNamespaceQuota(conn, namespace);
if (quotas != null && quotas.hasSpace()) {
QuotaSettings settings = QuotaSettingsFactory.removeNamespaceSpaceLimit(namespace);
try (Admin admin = conn.getAdmin()) {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.quotas;
import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
@ -188,7 +189,7 @@ public class QuotaCache implements Stoppable {
justification="I do not understand why the complaints, it looks good to me -- FIX")
protected void chore() {
// Prefetch online tables/namespaces
for (TableName table: QuotaCache.this.rsServices.getOnlineTables()) {
for (TableName table: ((HRegionServer)QuotaCache.this.rsServices).getOnlineTables()) {
if (table.isSystemTable()) continue;
if (!QuotaCache.this.tableQuotaCache.containsKey(table)) {
QuotaCache.this.tableQuotaCache.putIfAbsent(table, new QuotaState());

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.Set;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
/**
* Services exposed to CPs by {@link HRegionServer}
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
public interface CoprocessorRegionServerServices extends ImmutableOnlineRegions {
/**
* @return True if this regionserver is stopping.
*/
boolean isStopping();
/**
* @return Return the FileSystem object used by the regionserver
*/
FileSystem getFileSystem();
/**
* @return all the online tables in this RS
*/
Set<TableName> getOnlineTables();
/**
* Returns a reference to the servers' connection.
*
* Important note: this method returns a reference to Connection which is managed
* by Server itself, so callers must NOT attempt to close connection obtained.
*/
Connection getConnection();
/**
* @return The unique server name for this server.
*/
ServerName getServerName();
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -3008,12 +3008,12 @@ public class HRegionServer extends HasThread implements
}
return allRegions;
}
/**
* Gets the online tables in this RS.
* This method looks at the in-memory onlineRegions.
* @return all the online tables in this RS
*/
@Override
public Set<TableName> getOnlineTables() {
Set<TableName> tables = new HashSet<>();
synchronized (this.onlineRegions) {

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@ -59,7 +61,9 @@ import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
@ -107,13 +111,13 @@ public class RegionCoprocessorHost
*
* Encapsulation of the environment of each coprocessor
*/
static class RegionEnvironment extends BaseEnvironment<RegionCoprocessor>
private static class RegionEnvironment extends BaseEnvironment<RegionCoprocessor>
implements RegionCoprocessorEnvironment {
private Region region;
private RegionServerServices rsServices;
ConcurrentMap<String, Object> sharedData;
private final MetricRegistry metricRegistry;
private final Connection connection;
private final ServerName serverName;
/**
* Constructor
@ -125,7 +129,9 @@ public class RegionCoprocessorHost
final RegionServerServices services, final ConcurrentMap<String, Object> sharedData) {
super(impl, priority, seq, conf);
this.region = region;
this.rsServices = services;
// Mocks may have services as null at test time.
this.connection = services != null? services.getConnection(): null;
this.serverName = services != null? services.getServerName(): null;
this.sharedData = sharedData;
this.metricRegistry =
MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName());
@ -137,10 +143,14 @@ public class RegionCoprocessorHost
return region;
}
/** @return reference to the region server services */
@Override
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
return rsServices;
public Connection getConnection() {
return this.connection;
}
@Override
public ServerName getServerName() {
return this.serverName;
}
@Override
@ -165,6 +175,30 @@ public class RegionCoprocessorHost
}
}
/**
* Special version of RegionEnvironment that exposes RegionServerServices for Core
* Coprocessors only. Temporary hack until Core Coprocessors are integrated into Core.
*/
private static class RegionEnvironmentForCoreCoprocessors extends
RegionEnvironment implements HasRegionServerServices {
private final RegionServerServices rsServices;
public RegionEnvironmentForCoreCoprocessors(final RegionCoprocessor impl, final int priority,
final int seq, final Configuration conf, final Region region,
final RegionServerServices services, final ConcurrentMap<String, Object> sharedData) {
super(impl, priority, seq, conf, region, services, sharedData);
this.rsServices = services;
}
/**
* @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor
* consumption.
*/
public RegionServerServices getRegionServerServices() {
return this.rsServices;
}
}
static class TableCoprocessorAttribute {
private Path path;
private String className;
@ -405,8 +439,11 @@ public class RegionCoprocessorHost
SHARED_DATA_MAP.computeIfAbsent(instance.getClass().getName(),
k -> new ConcurrentHashMap<>());
}
return new RegionEnvironment(instance, priority, seq, conf, region,
rsServices, classData);
// If a CoreCoprocessor, return a 'richer' environment, one laden with RegionServerServices.
return instance.getClass().isAnnotationPresent(CoreCoprocessor.class)?
new RegionEnvironmentForCoreCoprocessors(instance, priority, seq, conf, region,
rsServices, classData):
new RegionEnvironment(instance, priority, seq, conf, region, rsServices, classData);
}
@Override

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -24,10 +24,13 @@ import com.google.protobuf.Service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
@ -37,7 +40,6 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
import org.apache.hadoop.hbase.security.User;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@InterfaceAudience.Private
public class RegionServerCoprocessorHost extends
@ -68,7 +70,11 @@ public class RegionServerCoprocessorHost extends
@Override
public RegionServerEnvironment createEnvironment(
RegionServerCoprocessor instance, int priority, int sequence, Configuration conf) {
return new RegionServerEnvironment(instance, priority, sequence, conf, this.rsServices);
// If a CoreCoprocessor, return a 'richer' environment, one laden with RegionServerServices.
return instance.getClass().isAnnotationPresent(CoreCoprocessor.class)?
new RegionServerEnvironmentForCoreCoprocessors(instance, priority, sequence, conf,
this.rsServices):
new RegionServerEnvironment(instance, priority, sequence, conf, this.rsServices);
}
@Override
@ -197,26 +203,33 @@ public class RegionServerCoprocessorHost extends
*/
private static class RegionServerEnvironment extends BaseEnvironment<RegionServerCoprocessor>
implements RegionServerCoprocessorEnvironment {
private final RegionServerServices regionServerServices;
private final MetricRegistry metricRegistry;
private final Connection connection;
private final ServerName serverName;
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BC_UNCONFIRMED_CAST",
justification="Intentional; FB has trouble detecting isAssignableFrom")
public RegionServerEnvironment(final RegionServerCoprocessor impl, final int priority,
final int seq, final Configuration conf, final RegionServerServices services) {
super(impl, priority, seq, conf);
this.regionServerServices = services;
// If coprocessor exposes any services, register them.
for (Service service : impl.getServices()) {
regionServerServices.registerService(service);
services.registerService(service);
}
this.connection = services.getConnection();
this.serverName = services.getServerName();
this.metricRegistry =
MetricsCoprocessor.createRegistryForRSCoprocessor(impl.getClass().getName());
}
@Override
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
return regionServerServices;
public ServerName getServerName() {
return this.serverName;
}
@Override
public Connection getConnection() {
return this.connection;
}
@Override
@ -230,4 +243,28 @@ public class RegionServerCoprocessorHost extends
MetricsCoprocessor.removeRegistry(metricRegistry);
}
}
/**
* Special version of RegionServerEnvironment that exposes RegionServerServices for Core
* Coprocessors only. Temporary hack until Core Coprocessors are integrated into Core.
*/
private static class RegionServerEnvironmentForCoreCoprocessors extends RegionServerEnvironment
implements HasRegionServerServices {
final RegionServerServices regionServerServices;
public RegionServerEnvironmentForCoreCoprocessors(final RegionServerCoprocessor impl,
final int priority, final int seq, final Configuration conf,
final RegionServerServices services) {
super(impl, priority, seq, conf, services);
this.regionServerServices = services;
}
/**
* @return An instance of RegionServerServices, an object NOT for general user-space Coprocessor
* consumption.
*/
public RegionServerServices getRegionServerServices() {
return this.regionServerServices;
}
}
}

View File

@ -41,11 +41,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
import com.google.protobuf.Service;
/**
* Services provided by {@link HRegionServer}
* A curated subset of services provided by {@link HRegionServer}.
* For use internally only. Passed to Managers, Services and Chores so can pass less-than-a
* full-on HRegionServer at test-time. Be judicious adding API. Changes cause ripples through
* the code base.
*/
@InterfaceAudience.Private
public interface RegionServerServices
extends Server, OnlineRegions, FavoredNodesForRegion, CoprocessorRegionServerServices {
public interface RegionServerServices extends Server, OnlineRegions, FavoredNodesForRegion {
/** @return the WAL for a particular region. Pass null for getting the
* default (common) WAL */

View File

@ -27,7 +27,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
@ -37,11 +40,13 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Pair;
/**
* An Observer to facilitate replication operations
*/
import javax.validation.constraints.Null;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
/**
* An Observer to add HFile References to replication queue.
*/
@CoreCoprocessor
@InterfaceAudience.Private
public class ReplicationObserver implements RegionCoprocessor, RegionObserver {
private static final Log LOG = LogFactory.getLog(ReplicationObserver.class);
@ -51,19 +56,24 @@ public class ReplicationObserver implements RegionCoprocessor, RegionObserver {
}
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
justification="NPE should never happen; if it does it is a bigger issue")
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
RegionCoprocessorEnvironment env = ctx.getEnvironment();
Configuration c = env.getConfiguration();
if (pairs == null || pairs.isEmpty()
|| !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
if (pairs == null || pairs.isEmpty() ||
!c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded "
+ "data replication.");
return;
}
HRegionServer rs = (HRegionServer) env.getCoprocessorRegionServerServices();
Replication rep = (Replication) rs.getReplicationSourceService();
// This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is
// just going to break. This is all private. Not allowed. Regions shouldn't assume they are
// hosted in a RegionServer. TODO: fix.
RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices();
Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService();
rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -193,5 +193,15 @@ public class ReplicationSyncUp extends Configured implements Tool {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.security.access;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
@ -43,9 +44,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
@ -63,13 +61,10 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.QualifierFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.security.User;
@ -125,27 +120,6 @@ public class AccessControlLists {
private static final Log LOG = LogFactory.getLog(AccessControlLists.class);
/**
* Create the ACL table
* @param master
* @throws IOException
*/
static void createACLTable(MasterServices master) throws IOException {
/** Table descriptor for ACL table */
final HTableDescriptor ACL_TABLEDESC = new HTableDescriptor(ACL_TABLE_NAME)
.addFamily(new HColumnDescriptor(ACL_LIST_FAMILY)
.setMaxVersions(1)
.setInMemory(true)
.setBlockCacheEnabled(true)
.setBlocksize(8 * 1024)
.setBloomFilterType(BloomType.NONE)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
// be the case if we are using CombinedBlockCache (Bucket Cache).
.setCacheDataInL1(true));
master.createSystemTable(ACL_TABLEDESC);
}
/**
* Stores a new user permission grant in the access control lists table.
* @param conf the configuration

View File

@ -54,13 +54,14 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@ -75,9 +76,13 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
@ -94,7 +99,6 @@ import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.net.Address;
@ -105,11 +109,11 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.Store;
@ -173,6 +177,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
* commands.
* </p>
*/
@CoreCoprocessor
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class AccessController implements MasterCoprocessor, RegionCoprocessor,
RegionServerCoprocessor, AccessControlService.Interface,
@ -285,8 +290,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
// to and fro conversion overhead. get req is converted to PB req
// and results are converted to PB results 1st and then to POJOs
// again. We could have avoided such at least in ACL table context..
try (Table t = e.getCoprocessorRegionServerServices().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
try (Table t = e.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME)) {
for (byte[] entry : entries) {
currentEntry = entry;
ListMultimap<String, TablePermission> perms =
@ -955,20 +959,24 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
ZooKeeperWatcher zk = null;
if (env instanceof MasterCoprocessorEnvironment) {
// if running on HMaster
MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
zk = mEnv.getMasterServices().getZooKeeper();
MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment)env;
if (mEnv instanceof HasMasterServices) {
zk = ((HasMasterServices)mEnv).getMasterServices().getZooKeeper();
}
} else if (env instanceof RegionServerCoprocessorEnvironment) {
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
assert rsEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
zk = ((RegionServerServices) rsEnv.getCoprocessorRegionServerServices()).getZooKeeper();
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment)env;
if (rsEnv instanceof HasRegionServerServices) {
zk = ((HasRegionServerServices)rsEnv).getRegionServerServices().getZooKeeper();
}
} else if (env instanceof RegionCoprocessorEnvironment) {
// if running at region
regionEnv = (RegionCoprocessorEnvironment) env;
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
assert regionEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
zk = ((RegionServerServices) regionEnv.getCoprocessorRegionServerServices()).getZooKeeper();
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT,
AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
if (regionEnv instanceof HasRegionServerServices) {
zk = ((HasRegionServerServices)regionEnv).getRegionServerServices().getZooKeeper();
}
}
// set the user-provider.
@ -1080,7 +1088,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Table table = c.getEnvironment().getMasterServices().getConnection().
try (Table table = c.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(),
userperm, table);
@ -1106,7 +1114,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Table table = c.getEnvironment().getMasterServices().getConnection().
try (Table table = c.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.removeTablePermissions(conf, tableName, table);
}
@ -1145,7 +1153,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
List<UserPermission> perms = tableAcls.get(tableName);
if (perms != null) {
for (UserPermission perm : perms) {
try (Table table = ctx.getEnvironment().getMasterServices().getConnection().
try (Table table = ctx.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.addUserPermission(conf, perm, table);
}
@ -1176,7 +1184,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
public Void run() throws Exception {
UserPermission userperm = new UserPermission(Bytes.toBytes(owner),
htd.getTableName(), null, Action.values());
try (Table table = c.getEnvironment().getMasterServices().getConnection().
try (Table table = c.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.addUserPermission(conf, userperm, table);
}
@ -1215,7 +1223,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Table table = ctx.getEnvironment().getMasterServices().getConnection().
try (Table table = ctx.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.removeTablePermissions(conf, tableName, columnFamily, table);
}
@ -1370,14 +1378,36 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices()
.getConnection(), AccessControlLists.ACL_TABLE_NAME)) {
// initialize the ACL storage table
AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices());
} else {
aclTabAvailable = true;
try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
if (!admin.tableExists(AccessControlLists.ACL_TABLE_NAME)) {
createACLTable(admin);
} else {
this.aclTabAvailable = true;
}
}
}
/**
* Create the ACL table
* @throws IOException
*/
private static void createACLTable(Admin admin) throws IOException {
/** Table descriptor for ACL table */
ColumnFamilyDescriptor cfd =
ColumnFamilyDescriptorBuilder.newBuilder(AccessControlLists.ACL_LIST_FAMILY).
setMaxVersions(1).
setInMemory(true).
setBlockCacheEnabled(true).
setBlocksize(8 * 1024).
setBloomFilterType(BloomType.NONE).
setScope(HConstants.REPLICATION_SCOPE_LOCAL).
// Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will
// be the case if we are using CombinedBlockCache (Bucket Cache).
setCacheDataInL1(true).build();
TableDescriptor td =
TableDescriptorBuilder.newBuilder(AccessControlLists.ACL_TABLE_NAME).
addColumnFamily(cfd).build();
admin.createTable(td);
}
@Override
public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
@ -1463,7 +1493,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Table table = ctx.getEnvironment().getMasterServices().getConnection().
try (Table table = ctx.getEnvironment().getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.removeNamespacePermissions(conf, namespace, table);
}
@ -2309,7 +2339,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
@Override
public Void run() throws Exception {
// regionEnv is set at #start. Hopefully not null at this point.
try (Table table = regionEnv.getCoprocessorRegionServerServices().getConnection().
try (Table table = regionEnv.getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.addUserPermission(regionEnv.getConfiguration(), perm, table,
request.getMergeExistingPermissions());
@ -2366,7 +2396,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
@Override
public Void run() throws Exception {
// regionEnv is set at #start. Hopefully not null here.
try (Table table = regionEnv.getCoprocessorRegionServerServices().getConnection().
try (Table table = regionEnv.getConnection().
getTable(AccessControlLists.ACL_TABLE_NAME)) {
AccessControlLists.removeUserPermission(regionEnv.getConfiguration(), perm, table);
}
@ -2593,13 +2623,16 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) {
// Otherwise, if the requestor has ADMIN or CREATE privs for all listed tables, the
// request can be granted.
MasterServices masterServices = ctx.getEnvironment().getMasterServices();
for (TableName tableName: tableNamesList) {
// Skip checks for a table that does not exist
if (!masterServices.getTableStateManager().isTablePresent(tableName))
continue;
requirePermission(getActiveUser(ctx), "getTableDescriptors", tableName, null, null,
TableName [] sns = null;
try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
sns = admin.listTableNames();
if (sns == null) return;
for (TableName tableName: tableNamesList) {
// Skip checks for a table that does not exist
if (!admin.tableExists(tableName)) continue;
requirePermission(getActiveUser(ctx), "getTableDescriptors", tableName, null, null,
Action.ADMIN, Action.CREATE);
}
}
}
}

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@ -147,8 +146,7 @@ public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, Ma
private void verifyCoprocessors(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor htd) throws IOException {
MasterServices services = ctx.getEnvironment().getMasterServices();
Configuration conf = services.getConfiguration();
Configuration conf = ctx.getEnvironment().getConfiguration();
Collection<String> paths =
conf.getStringCollection(

View File

@ -27,6 +27,8 @@ import java.util.Collections;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@ -46,6 +48,7 @@ import org.apache.yetus.audience.InterfaceAudience;
* Provides a service for obtaining authentication tokens via the
* {@link AuthenticationProtos} AuthenticationService coprocessor service.
*/
@CoreCoprocessor
@InterfaceAudience.Private
public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface,
RegionCoprocessor {
@ -59,11 +62,13 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService
public void start(CoprocessorEnvironment env) {
// if running at region
if (env instanceof RegionCoprocessorEnvironment) {
RegionCoprocessorEnvironment regionEnv =
(RegionCoprocessorEnvironment)env;
assert regionEnv.getCoprocessorRegionServerServices() instanceof RegionServerServices;
RpcServerInterface server = ((RegionServerServices) regionEnv
.getCoprocessorRegionServerServices()).getRpcServer();
RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment)env;
/* Getting the RpcServer from a RegionCE is wrong. There cannot be an expectation that Region
is hosted inside a RegionServer. If you need RpcServer, then pass in a RegionServerCE.
TODO: FIX.
*/
RegionServerServices rss = ((HasRegionServerServices)regionEnv).getRegionServerServices();
RpcServerInterface server = rss.getRpcServer();
SecretManager<?> mgr = ((RpcServer)server).getSecretManager();
if (mgr instanceof AuthenticationTokenSecretManager) {
secretManager = (AuthenticationTokenSecretManager)mgr;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -46,11 +46,11 @@ import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.AuthUtil;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.io.util.StreamUtils;
import org.apache.hadoop.hbase.regionserver.OperationStatus;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@ -112,9 +111,15 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
@Override
public void init(RegionCoprocessorEnvironment e) throws IOException {
assert e.getCoprocessorRegionServerServices() instanceof RegionServerServices;
ZooKeeperWatcher zk = ((RegionServerServices) e.getCoprocessorRegionServerServices())
.getZooKeeper();
/* So, presumption that the RegionCE has a ZK Connection is too much. Why would a RCE have
* a ZK instance? This is cheating presuming we have access to the RS ZKW. TODO: Fix.
*
* And what is going on here? This ain't even a Coprocessor? And its being passed a CP Env?
*/
// This is a CoreCoprocessor. On creation, we should have gotten an environment that
// implements HasRegionServerServices so we can get at RSS. FIX!!!! Integrate this CP as
// native service.
ZooKeeperWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper();
try {
labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf);
} catch (IOException ioe) {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Delete;
@ -70,6 +71,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
@ -85,7 +87,6 @@ import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos;
@ -122,6 +123,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.MapMaker;
* Coprocessor that has both the MasterObserver and RegionObserver implemented that supports in
* visibility labels
*/
@CoreCoprocessor
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
// TODO: break out Observer functions into separate class/sub-class.
public class VisibilityController implements MasterCoprocessor, RegionCoprocessor,
@ -212,8 +214,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
// Need to create the new system table for labels here
MasterServices master = ctx.getEnvironment().getMasterServices();
if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getConnection(), LABELS_TABLE_NAME)) {
HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
labelsColumn.setBloomFilterType(BloomType.NONE);
@ -226,7 +227,9 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso
DisabledRegionSplitPolicy.class.getName());
labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
Bytes.toBytes(true));
master.createSystemTable(labelsTable);
try (Admin admin = ctx.getEnvironment().getConnection().getAdmin()) {
admin.createTable(labelsTable);
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -110,11 +110,6 @@ public class MockRegionServerServices implements RegionServerServices {
return null;
}
@Override
public Set<TableName> getOnlineTables() {
return null;
}
@Override
public List<Region> getRegions() {
return null;

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@ -126,6 +128,7 @@ public class TestReplicaWithCluster {
/**
* This copro is used to simulate region server down exception for Get and Scan
*/
@CoreCoprocessor
public static class RegionServerStoppedCopro implements RegionCoprocessor, RegionObserver {
public RegionServerStoppedCopro() {
@ -145,8 +148,7 @@ public class TestReplicaWithCluster {
// Fail for the primary replica and replica 1
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
throw new RegionServerStoppedException("Server " +
e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName()
+ " not running");
} else {
LOG.info("We're replica region " + replicaId);
@ -162,8 +164,7 @@ public class TestReplicaWithCluster {
// Fail for the primary replica and replica 1
if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() <= 1) {
LOG.info("Throw Region Server Stopped Exceptoin for replica id " + replicaId);
throw new RegionServerStoppedException("Server " +
e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
throw new RegionServerStoppedException("Server " + e.getEnvironment().getServerName()
+ " not running");
} else {
LOG.info("We're replica region " + replicaId);
@ -197,8 +198,8 @@ public class TestReplicaWithCluster {
if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && (replicaId == 0)) {
LOG.info("Get, throw Region Server Stopped Exceptoin for region " + e.getEnvironment()
.getRegion().getRegionInfo());
throw new RegionServerStoppedException(
"Server " + e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
throw new RegionServerStoppedException("Server " +
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+ " not running");
}
} else {
@ -228,9 +229,9 @@ public class TestReplicaWithCluster {
LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " + e.getEnvironment()
.getRegion().getRegionInfo());
throw new RegionServerStoppedException(
"Server " + e.getEnvironment().getCoprocessorRegionServerServices().getServerName()
+ " not running");
throw new RegionServerStoppedException("Server " +
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+ " not running");
} else {
LOG.info("Scan, We're replica region " + replicaId);
}

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.Store;
@ -70,6 +71,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.mockito.Mockito;
@Category({CoprocessorTests.class, SmallTests.class})
public class TestCoprocessorInterface {
@ -387,7 +389,8 @@ public class TestCoprocessorInterface {
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
RegionCoprocessorHost host = new RegionCoprocessorHost(r,
Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
@ -421,7 +424,8 @@ public class TestCoprocessorInterface {
HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
RegionCoprocessorHost host =
new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {

View File

@ -0,0 +1,114 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import java.io.IOException;
import static org.junit.Assert.assertTrue;
/**
* Ensure Coprocessors get ShortCircuit Connections when they get a Connection from their
* CoprocessorEnvironment.
*/
@Category({CoprocessorTests.class, SmallTests.class})
public class TestCoprocessorShortCircuitRPC {
@Rule
public TestName name = new TestName();
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
private static final HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
/**
* Start up a mini cluster with my three CPs loaded.
*/
@BeforeClass
public static void beforeClass() throws Exception {
// Set my test Coprocessors into the Configuration before we start up the cluster.
Configuration conf = HTU.getConfiguration();
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
TestMasterCoprocessor.class.getName());
conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY,
TestRegionServerCoprocessor.class.getName());
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
TestRegionCoprocessor.class.getName());
HTU.startMiniCluster();
}
@AfterClass
public static void afterClass() throws Exception {
HTU.shutdownMiniCluster();
}
// Three test coprocessors, one of each type that has a Connection in its environment
// (WALCoprocessor does not).
public static class TestMasterCoprocessor implements MasterCoprocessor {
public TestMasterCoprocessor() {}
@Override
public void start(CoprocessorEnvironment env) throws IOException {
// At start, we get base CoprocessorEnvironment Type, not MasterCoprocessorEnvironment,
check(((MasterCoprocessorEnvironment)env).getConnection());
}
}
public static class TestRegionServerCoprocessor implements RegionServerCoprocessor {
public TestRegionServerCoprocessor() {}
@Override
public void start(CoprocessorEnvironment env) throws IOException {
// At start, we get base CoprocessorEnvironment Type, not RegionServerCoprocessorEnvironment,
check(((RegionServerCoprocessorEnvironment)env).getConnection());
}
}
public static class TestRegionCoprocessor implements RegionCoprocessor {
public TestRegionCoprocessor() {}
@Override
public void start(CoprocessorEnvironment env) throws IOException {
// At start, we get base CoprocessorEnvironment Type, not RegionCoprocessorEnvironment,
check(((RegionCoprocessorEnvironment)env).getConnection());
}
}
private static void check(Connection connection) {
assertTrue(connection instanceof ConnectionUtils.ShortCircuitingClusterConnection);
}
@Test
public void test() throws IOException {
// Nothing to do in here. The checks are done as part of the cluster spinup when CPs get
// loaded. Need this here so this class looks like a test.
}
}

View File

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.MockMasterServices;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test CoreCoprocessor Annotation works giving access to facility not usually available.
* Test MasterCoprocessor.
*/
@Category({CoprocessorTests.class, SmallTests.class})
public class TestCoreMasterCoprocessor {
@Rule public TestName name = new TestName();
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
private static final HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
private MasterServices ms;
private MasterCoprocessorHost mch;
@Before
public void before() throws IOException {
String methodName = this.name.getMethodName();
this.ms = new MockMasterServices(HTU.getConfiguration(), null);
this.mch = new MasterCoprocessorHost(this.ms, HTU.getConfiguration());
this.mch.preMasterInitialization();
}
@After
public void after() throws IOException {
this.mch.preStopMaster();
}
/**
* No annotation with CoreCoprocessor. This should make it so I can NOT get at instance of a
* MasterServices instance after some gymnastics.
*/
public static class NotCoreMasterCoprocessor implements MasterCoprocessor {
public NotCoreMasterCoprocessor() {}
}
/**
* Annotate with CoreCoprocessor. This should make it so I can get at instance of a
* MasterServices instance after some gymnastics.
*/
@CoreCoprocessor
public static class CoreMasterCoprocessor implements MasterCoprocessor {
public CoreMasterCoprocessor() {}
}
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to
* access a MasterServices instance. Assert the opposite too.
* Do it to MasterCoprocessors.
* @throws IOException
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
MasterCoprocessorEnvironment env =
this.mch.load(null, NotCoreMasterCoprocessor.class.getName(), 0, HTU.getConfiguration());
assertFalse(env instanceof HasMasterServices);
env = this.mch.load(null, CoreMasterCoprocessor.class.getName(), 1, HTU.getConfiguration());
assertTrue(env instanceof HasMasterServices);
assertEquals(this.ms, ((HasMasterServices)env).getMasterServices());
}
}

View File

@ -0,0 +1,113 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MockRegionServerServices;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test CoreCoprocessor Annotation works giving access to facility not usually available.
* Test RegionCoprocessor.
*/
@Category({CoprocessorTests.class, SmallTests.class})
public class TestCoreRegionCoprocessor {
@Rule public TestName name = new TestName();
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
private HRegion region = null;
private RegionServerServices rss;
@Before
public void before() throws IOException {
String methodName = this.name.getMethodName();
TableName tn = TableName.valueOf(methodName);
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(methodName)).build();
TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).addColumnFamily(cfd).build();
RegionInfo ri = RegionInfoBuilder.newBuilder(tn).build();
this.rss = new MockRegionServerServices(HTU.getConfiguration());
this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null);
}
@After
public void after() throws IOException {
this.region.close();
}
/**
* No annotation with CoreCoprocessor. This should make it so I can NOT get at instance of a
* RegionServerServices instance after some gymnastics.
*/
public static class NotCoreRegionCoprocessor implements RegionCoprocessor {
public NotCoreRegionCoprocessor() {}
}
/**
* Annotate with CoreCoprocessor. This should make it so I can get at instance of a
* RegionServerServices instance after some gymnastics.
*/
@org.apache.hadoop.hbase.coprocessor.CoreCoprocessor
public static class CoreRegionCoprocessor implements RegionCoprocessor {
public CoreRegionCoprocessor() {}
}
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to
* access a RegionServerServices instance. Assert the opposite too.
* Do it to RegionCoprocessors.
* @throws IOException
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
RegionCoprocessorHost rch = region.getCoprocessorHost();
RegionCoprocessorEnvironment env =
rch.load(null, NotCoreRegionCoprocessor.class.getName(), 0, HTU.getConfiguration());
assertFalse(env instanceof HasRegionServerServices);
env = rch.load(null, CoreRegionCoprocessor.class.getName(), 1, HTU.getConfiguration());
assertTrue(env instanceof HasRegionServerServices);
assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices());
}
}

View File

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MockRegionServerServices;
import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test CoreCoprocessor Annotation works giving access to facility not usually available.
* Test RegionServerCoprocessor.
*/
@Category({CoprocessorTests.class, SmallTests.class})
public class TestCoreRegionServerCoprocessor {
@Rule public TestName name = new TestName();
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
private static final HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
private RegionServerServices rss;
private RegionServerCoprocessorHost rsch;
@Before
public void before() throws IOException {
String methodName = this.name.getMethodName();
this.rss = new MockRegionServerServices(HTU.getConfiguration());
this.rsch = new RegionServerCoprocessorHost(this.rss, HTU.getConfiguration());
}
@After
public void after() throws IOException {
this.rsch.preStop("Stopping", null);
}
/**
* No annotation with CoreCoprocessor. This should make it so I can NOT get at instance of a
* RegionServerServices instance after some gymnastics.
*/
public static class NotCoreRegionServerCoprocessor implements RegionServerCoprocessor {
public NotCoreRegionServerCoprocessor() {}
}
/**
* Annotate with CoreCoprocessor. This should make it so I can get at instance of a
* RegionServerServices instance after some gymnastics.
*/
@CoreCoprocessor
public static class CoreRegionServerCoprocessor implements RegionServerCoprocessor {
public CoreRegionServerCoprocessor() {}
}
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to
* access a RegionServerServices instance. Assert the opposite too.
* Do it to RegionServerCoprocessors.
* @throws IOException
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
RegionServerCoprocessorEnvironment env =
rsch.load(null, NotCoreRegionServerCoprocessor.class.getName(), 0, HTU.getConfiguration());
assertFalse(env instanceof HasRegionServerServices);
env = rsch.load(null, CoreRegionServerCoprocessor.class.getName(), 1, HTU.getConfiguration());
assertTrue(env instanceof HasRegionServerServices);
assertEquals(this.rss, ((HasRegionServerServices)env).getRegionServerServices());
}
}

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -74,7 +74,7 @@ public class TestOpenTableInCoprocessor {
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
final WALEdit edit, final Durability durability) throws IOException {
try (Table table = e.getEnvironment().getCoprocessorRegionServerServices().getConnection().
try (Table table = e.getEnvironment().getConnection().
getTable(otherTable)) {
table.put(put);
completed[0] = true;
@ -112,8 +112,7 @@ public class TestOpenTableInCoprocessor {
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
final WALEdit edit, final Durability durability) throws IOException {
try (Table table = e.getEnvironment().getCoprocessorRegionServerServices().
getConnection().getTable(otherTable, getPool())) {
try (Table table = e.getEnvironment().getConnection().getTable(otherTable, getPool())) {
Put p = new Put(new byte[]{'a'});
p.addColumn(family, null, new byte[]{'a'});
try {

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MockRegionServerServices;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Durability;
@ -39,11 +40,13 @@ import org.apache.hadoop.hbase.regionserver.ChunkCreator;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@Category({CoprocessorTests.class, SmallTests.class})
public class TestRegionObserverStacking extends TestCase {
@ -128,7 +131,8 @@ public class TestRegionObserverStacking extends TestCase {
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
RegionCoprocessorHost host = new RegionCoprocessorHost(r,
Mockito.mock(RegionServerServices.class), conf);
r.setCoprocessorHost(host);
return r;
}

View File

@ -162,4 +162,4 @@ public class TestRegionServerCoprocessorExceptionWithAbort {
}
}
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -23,6 +23,7 @@ import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.Server;
@ -454,4 +455,9 @@ public class MockNoopMasterServices implements MasterServices, Server {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -535,12 +535,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
return null;
}
@Override
public Set<TableName> getOnlineTables() {
// TODO Auto-generated method stub
return null;
}
@Override
public Leases getLeases() {
// TODO Auto-generated method stub

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -28,6 +28,7 @@ import java.util.concurrent.Semaphore;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -332,5 +333,15 @@ public class TestActiveMasterManager {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -199,7 +199,6 @@ public class TestHFileCleaner {
}
static class DummyServer implements Server {
@Override
public Configuration getConfiguration() {
return UTIL.getConfiguration();
@ -263,6 +262,16 @@ public class TestHFileCleaner {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
@Test

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -201,5 +201,15 @@ public class TestHFileLinkCleaner {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -309,6 +309,16 @@ public class TestLogsCleaner {
public ClusterConnection getClusterConnection() {
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
@ -320,6 +320,16 @@ public class TestReplicationHFileCleaner {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -27,6 +27,7 @@ import java.lang.management.ManagementFactory;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -897,6 +898,16 @@ public class TestHeapMemoryManager {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
static class CustomHeapMemoryTuner implements HeapMemoryTuner {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@ -168,6 +170,7 @@ public class TestRegionServerAbort {
assertFalse(cluster.getRegionServer(0).isStopped());
}
@CoreCoprocessor
public static class StopBlockingRegionObserver
implements RegionServerCoprocessor, RegionCoprocessor, RegionServerObserver, RegionObserver {
public static final String DO_ABORT = "DO_ABORT";
@ -187,9 +190,12 @@ public class TestRegionServerAbort {
public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit,
Durability durability) throws IOException {
if (put.getAttribute(DO_ABORT) != null) {
HRegionServer rs = (HRegionServer) c.getEnvironment().getCoprocessorRegionServerServices();
LOG.info("Triggering abort for regionserver " + rs.getServerName());
rs.abort("Aborting for test");
// TODO: Change this so it throws a CP Abort Exception instead.
RegionServerServices rss =
((HasRegionServerServices)c.getEnvironment()).getRegionServerServices();
String str = "Aborting for test";
LOG.info(str + " " + rss.getServerName());
rss.abort(str, new Throwable(str));
}
}

View File

@ -32,6 +32,7 @@ import java.util.concurrent.atomic.LongAdder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
@ -149,6 +150,16 @@ public class TestSplitLogWorker {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems)

View File

@ -1,4 +1,4 @@
/**
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -560,6 +560,15 @@ public class TestWALLockup {
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
static class DummyWALActionsListener extends WALActionsListener.Base {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.replication;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -474,5 +475,14 @@ public class TestReplicationStateHBaseImpl {
abortCount = 0;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -26,6 +26,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@ -205,5 +206,15 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@ -29,6 +29,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@ -302,5 +303,15 @@ public class TestReplicationTrackerZKImpl {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.ClusterConnection;
@ -722,5 +721,15 @@ public abstract class TestReplicationSourceManager {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}
}

View File

@ -35,6 +35,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@ -57,7 +58,6 @@ import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
import org.apache.hadoop.hbase.regionserver.CoprocessorRegionServerServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.security.SecurityInfo;
@ -238,6 +238,16 @@ public class TestTokenAuthentication {
return ServerName.valueOf(isa.getHostName(), isa.getPort(), startcode);
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return this.stopped;
}
@Override
public void abort(String reason, Throwable error) {
LOG.fatal("Aborting on: "+reason, error);
@ -268,10 +278,6 @@ public class TestTokenAuthentication {
@Override
public void shutdown() {}
public CoprocessorRegionServerServices getCoprocessorRegionServerServices() {
return mockServices;
}
@Override
public ConcurrentMap<String, Object> getSharedData() { return null; }
@ -307,6 +313,16 @@ public class TestTokenAuthentication {
public HRegionInfo getRegionInfo() {
return null;
}
@Override
public ServerName getServerName() {
return null;
}
@Override
public Connection getConnection() {
return null;
}
});
started = true;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -133,4 +134,14 @@ public class MockServer implements Server {
// TODO Auto-generated method stub
return null;
}
@Override
public FileSystem getFileSystem() {
return null;
}
@Override
public boolean isStopping() {
return false;
}
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@ -44,6 +45,7 @@ import java.util.Optional;
/**
* Simple test coprocessor for injecting exceptions on Get requests.
*/
@CoreCoprocessor
public class ErrorThrowingGetObserver implements RegionCoprocessor, RegionObserver {
@Override
public Optional<RegionObserver> getRegionObserver() {
@ -68,8 +70,7 @@ public class ErrorThrowingGetObserver implements RegionCoprocessor, RegionObserv
case NOT_SERVING_REGION:
throw new NotServingRegionException("Failing for test");
case REGION_MOVED:
throw new RegionMovedException(
e.getEnvironment().getCoprocessorRegionServerServices().getServerName(), 1);
throw new RegionMovedException(e.getEnvironment().getServerName(), 1);
case SCANNER_RESET:
throw new ScannerResetException("Failing for test");
case UNKNOWN_SCANNER: