diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java index 4131b4d34d3..e2b27ff537f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.util.Pair; @@ -43,8 +45,14 @@ import org.apache.yetus.audience.InterfaceAudience; * An Observer to facilitate backup operations */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class BackupObserver implements RegionObserver { +public class BackupObserver implements RegionCoprocessor, RegionObserver { private static final Log LOG = LogFactory.getLog(BackupObserver.class); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public boolean postBulkLoadHFile(ObserverContext ctx, List> stagingFamilyPaths, Map> finalPaths, diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index 843ed38559c..af8e90785f7 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -67,9 +69,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{ POST_DELETE_SNAPSHOT_FAILURE } - public static class MasterSnapshotObserver implements MasterObserver { - - + public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver { List failures = new ArrayList(); public void setFailures(Failure ... f) { @@ -79,6 +79,11 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{ } } + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) @@ -121,8 +126,8 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{ private MasterSnapshotObserver getMasterSnapshotObserver() { - return (MasterSnapshotObserver)TEST_UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSnapshotObserver.class.getName()); + return TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost() + .findCoprocessor(MasterSnapshotObserver.class); } @Test diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index f253350936c..38fe74e2b23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -20,7 +20,9 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.util.Optional; +import com.google.protobuf.Service; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -53,9 +55,22 @@ public interface Coprocessor { STOPPED } - // Interface + /** + * Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the + * coprocessor. + */ default void start(CoprocessorEnvironment env) throws IOException {} + /** + * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the + * coprocessor. + */ default void stop(CoprocessorEnvironment env) throws IOException {} + /** + * Coprocessor endpoints providing protobuf services should implement this interface. + */ + default Optional getService() { + return Optional.empty(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index adc7386b792..aabf3b56b46 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.client.Table; * Coprocessor environment state. */ @InterfaceAudience.Private -public interface CoprocessorEnvironment { +public interface CoprocessorEnvironment { /** @return the Coprocessor interface version */ int getVersion(); @@ -39,7 +39,7 @@ public interface CoprocessorEnvironment { String getHBaseVersion(); /** @return the loaded coprocessor instance */ - Coprocessor getInstance(); + C getInstance(); /** @return the priority assigned to the loaded coprocessor */ int getPriority(); @@ -67,4 +67,13 @@ public interface CoprocessorEnvironment { * @return the classloader for the loaded coprocessor instance */ ClassLoader getClassLoader(); + + /** + * After a coprocessor has been loaded in an encapsulation of an environment, CoprocessorHost + * calls this function to initialize the environment. + */ + void startup() throws IOException; + + /** Clean up the environment. Called by CoprocessorHost when it itself is shutting down. */ + void shutdown(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index e1f274c3afb..c62ab1d7000 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -681,8 +681,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable -extends AggregateService implements CoprocessorService, Coprocessor { +public class AggregateImplementation +extends AggregateService implements RegionCoprocessor { protected static final Log log = LogFactory.getLog(AggregateImplementation.class); private RegionCoprocessorEnvironment env; @@ -156,7 +156,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { results.clear(); } while (hasMoreRows); if (min != null) { - response = AggregateResponse.newBuilder().addFirstPart( + response = AggregateResponse.newBuilder().addFirstPart( ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { @@ -211,7 +211,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { results.clear(); } while (hasMoreRows); if (sumVal != null) { - response = AggregateResponse.newBuilder().addFirstPart( + response = AggregateResponse.newBuilder().addFirstPart( ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { @@ -262,7 +262,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } while (hasMoreRows); ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); bb.rewind(); - response = AggregateResponse.newBuilder().addFirstPart( + response = AggregateResponse.newBuilder().addFirstPart( ByteString.copyFrom(bb)).build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -310,7 +310,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } List results = new ArrayList<>(); boolean hasMoreRows = false; - + do { results.clear(); hasMoreRows = scanner.next(results); @@ -371,7 +371,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { List results = new ArrayList<>(); boolean hasMoreRows = false; - + do { tempVal = null; hasMoreRows = scanner.next(results); @@ -413,7 +413,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { * It is computed for the combination of column * family and column qualifier(s) in the given row range as defined in the * Scan object. In its current implementation, it takes one column family and - * two column qualifiers. The first qualifier is for values column and + * two column qualifiers. The first qualifier is for values column and * the second qualifier (optional) is for weight column. */ @Override @@ -437,7 +437,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { List results = new ArrayList<>(); boolean hasMoreRows = false; - + do { tempVal = null; tempWeight = null; @@ -461,7 +461,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString(); AggregateResponse.Builder pair = AggregateResponse.newBuilder(); pair.addFirstPart(first_sumVal); - pair.addFirstPart(first_sumWeights); + pair.addFirstPart(first_sumWeights); response = pair.build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -500,8 +500,8 @@ extends AggregateService implements CoprocessorService, Coprocessor { } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } /** @@ -527,5 +527,5 @@ extends AggregateService implements CoprocessorService, Coprocessor { public void stop(CoprocessorEnvironment env) throws IOException { // nothing to do } - + } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 8f84d9e058f..9b8901e3045 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,7 +36,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -87,8 +87,7 @@ import org.apache.hadoop.util.ReflectionUtils; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class Export extends ExportProtos.ExportService - implements Coprocessor, CoprocessorService { +public class Export extends ExportProtos.ExportService implements RegionCoprocessor { private static final Log LOG = LogFactory.getLog(Export.class); private static final Class DEFAULT_CODEC = DefaultCodec.class; @@ -312,8 +311,8 @@ public class Export extends ExportProtos.ExportService } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 3f8b3bbccf1..4286174458b 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -21,14 +21,13 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; +import org.apache.yetus.audience.InterfaceAudience; /** * Coprocessor service for bulk loads in secure mode. @@ -55,8 +55,7 @@ import com.google.protobuf.Service; */ @InterfaceAudience.Private @Deprecated -public class SecureBulkLoadEndpoint extends SecureBulkLoadService - implements CoprocessorService, Coprocessor { +public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements RegionCoprocessor { public static final long VERSION = 0L; @@ -176,7 +175,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 9c03e6a7906..54f1f5396b7 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -21,12 +21,12 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.ColumnAggregationService; @@ -45,13 +45,13 @@ import com.google.protobuf.Service; * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService -implements Coprocessor, CoprocessorService { +implements RegionCoprocessor { private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java index 54e33587ce3..43a0075b6ce 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; @@ -47,14 +47,12 @@ import com.google.protobuf.Service; * response values. */ public class ColumnAggregationEndpointNullResponse - extends - ColumnAggregationServiceNullResponse -implements Coprocessor, CoprocessorService { + extends ColumnAggregationServiceNullResponse implements RegionCoprocessor { private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpointNullResponse.class); private RegionCoprocessorEnvironment env = null; @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 6e8c571d689..0faa717e4dd 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; @@ -48,14 +48,14 @@ import com.google.protobuf.Service; * coprocessor endpoints throwing exceptions. */ public class ColumnAggregationEndpointWithErrors - extends - ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors -implements Coprocessor, CoprocessorService { + extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors + implements RegionCoprocessor { private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpointWithErrors.class); private RegionCoprocessorEnvironment env = null; + @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override @@ -73,7 +73,7 @@ implements Coprocessor, CoprocessorService { } @Override - public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, + public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, RpcCallback done) { // aggregate at each region Scan scan = new Scan(); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 5b7c1e9144e..bc8d3e9c4d3 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -34,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.util.Threads; import java.io.IOException; +import java.util.Optional; /** * Test implementation of a coprocessor endpoint exposing the @@ -41,13 +41,12 @@ import java.io.IOException; * only. */ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto - implements CoprocessorService, Coprocessor { - public ProtobufCoprocessorService() { - } + implements MasterCoprocessor, RegionCoprocessor { + public ProtobufCoprocessorService() {} @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 16fb03c378d..84c777ced6d 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -23,28 +23,21 @@ import static org.junit.Assert.fail; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Optional; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TestAsyncAdminBase; -import org.apache.hadoop.hbase.coprocessor.TestRegionServerCoprocessorEndpoint.DummyRegionServerEndpoint; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException; -import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.BeforeClass; @@ -56,7 +49,6 @@ import org.junit.runners.Parameterized; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; @RunWith(Parameterized.class) @Category({ ClientTests.class, MediumTests.class }) @@ -133,14 +125,14 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { } } - static class DummyRegionServerEndpoint extends DummyService implements Coprocessor, SingletonCoprocessorService { + public static class DummyRegionServerEndpoint extends DummyService + implements RegionServerCoprocessor { - public DummyRegionServerEndpoint() { - } + public DummyRegionServerEndpoint() {} @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 56fdca6db40..9067c88cb62 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -54,7 +54,12 @@ public class TestClassLoading { private static final Log LOG = LogFactory.getLog(TestClassLoading.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - public static class TestMasterCoprocessor implements MasterObserver {} + public static class TestMasterCoprocessor implements MasterCoprocessor, MasterObserver { + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + } private static MiniDFSCluster cluster; @@ -69,7 +74,7 @@ public class TestClassLoading { private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; // TOOD: Fix the import of this handler. It is coming in from a package that is far away. private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; - private static Class regionServerCoprocessor = SampleRegionWALObserver.class; + private static Class regionServerCoprocessor = SampleRegionWALCoprocessor.class; private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = @@ -110,8 +115,9 @@ public class TestClassLoading { } static File buildCoprocessorJar(String className) throws Exception { - String code = "import org.apache.hadoop.hbase.coprocessor.*;" + - "public class " + className + " implements RegionObserver {}"; + String code = + "import org.apache.hadoop.hbase.coprocessor.*;" + + "public class " + className + " implements RegionCoprocessor {}"; return ClassLoaderTestHelper.buildJar( TEST_UTIL.getDataTestDir().toString(), className, code); } @@ -539,19 +545,6 @@ public class TestClassLoading { assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } - @Test - public void testFindCoprocessors() { - // HBASE 12277: - CoprocessorHost masterCpHost = - TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost(); - - List masterObservers = masterCpHost.findCoprocessors(MasterObserver.class); - - assertTrue(masterObservers != null && masterObservers.size() > 0); - assertEquals(masterCoprocessor.getSimpleName(), - masterObservers.get(0).getClass().getSimpleName()); - } - private void waitForTable(TableName name) throws InterruptedException, IOException { // First wait until all regions are online TEST_UTIL.waitTableEnabled(name); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java new file mode 100644 index 00000000000..c2ff36e0bed --- /dev/null +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.*; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +/** + * Tests to ensure that 2.0 is backward compatible in loading CoprocessorService. + */ +@Category({SmallTests.class}) +public class TestCoprocessorServiceBackwardCompatibility { + private static HBaseTestingUtility TEST_UTIL = null; + private static Configuration CONF = null; + + public static class DummyCoprocessorService extends DummyService + implements CoprocessorService, SingletonCoprocessorService { + static int numCalls = 0; + + @Override + public Service getService() { + return this; + } + + @Override + public void dummyCall(RpcController controller, DummyRequest request, + RpcCallback callback) { + callback.run(DummyResponse.newBuilder().setValue("").build()); + numCalls++; + } + + @Override + public void dummyThrow(RpcController controller, DummyRequest request, + RpcCallback callback) { + } + } + + @BeforeClass + public static void setupBeforeClass() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + CONF = TEST_UTIL.getConfiguration(); + DummyCoprocessorService.numCalls = 0; + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testCoprocessorServiceLoadedByMaster() throws Exception { + CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + DummyCoprocessorService.class.getName()); + TEST_UTIL.startMiniCluster(); + + TEST_UTIL.getAdmin().coprocessorService().callBlockingMethod( + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, + DummyRequest.getDefaultInstance(), DummyResponse.getDefaultInstance()); + + assertEquals(1, DummyCoprocessorService.numCalls); + } + + @Test + public void testCoprocessorServiceLoadedByRegionServer() throws Exception { + CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + DummyCoprocessorService.class.getName()); + TEST_UTIL.startMiniCluster(); + TEST_UTIL.getAdmin().coprocessorService( + TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()).callBlockingMethod( + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, + DummyRequest.getDefaultInstance(), DummyResponse.getDefaultInstance()); + assertEquals(1, DummyCoprocessorService.numCalls); + } + + @Test + public void testCoprocessorServiceLoadedByRegion() throws Throwable { + CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + DummyCoprocessorService.class.getName()); + TEST_UTIL.startMiniCluster(); + TEST_UTIL.getConnection().getTable(TableName.valueOf("hbase:meta")).batchCoprocessorService( + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), + DummyRequest.getDefaultInstance(), Bytes.toBytes(""), Bytes.toBytes(""), + DummyResponse.getDefaultInstance()); + assertEquals(1, DummyCoprocessorService.numCalls); + } +} diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index 9dc4822168b..2e22a162df8 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -22,9 +22,9 @@ import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.ServerName; @@ -102,21 +102,12 @@ public class TestRegionServerCoprocessorEndpoint { ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim()); } - static class DummyRegionServerEndpoint extends DummyService implements Coprocessor, SingletonCoprocessorService { + public static class DummyRegionServerEndpoint extends DummyService + implements RegionServerCoprocessor { @Override - public Service getService() { - return this; - } - - @Override - public void start(CoprocessorEnvironment env) throws IOException { - // TODO Auto-generated method stub - } - - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - // TODO Auto-generated method stub + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index c3f711980d8..8c111926770 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -310,7 +310,7 @@ public class TestRowProcessorEndpoint { * So they can be loaded with the endpoint on the coprocessor. */ public static class RowProcessorEndpoint - extends BaseRowProcessorEndpoint implements CoprocessorService { + extends BaseRowProcessorEndpoint { public static class IncrementCounterProcessor extends BaseRowProcessor { @@ -417,7 +417,7 @@ public class TestRowProcessorEndpoint { @Override public FriendsOfFriendsProcessorResponse getResult() { - FriendsOfFriendsProcessorResponse.Builder builder = + FriendsOfFriendsProcessorResponse.Builder builder = FriendsOfFriendsProcessorResponse.newBuilder(); builder.addAllResult(result); return builder.build(); @@ -469,7 +469,7 @@ public class TestRowProcessorEndpoint { } @Override - public void initialize(FriendsOfFriendsProcessorRequest request) + public void initialize(FriendsOfFriendsProcessorRequest request) throws IOException { this.person = request.getPerson().toByteArray(); this.row = request.getRow().toByteArray(); @@ -546,7 +546,7 @@ public class TestRowProcessorEndpoint { // Delete from the current row and add to the other row Delete d = new Delete(rows[i]); KeyValue kvDelete = - new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv.getTimestamp(), KeyValue.Type.Delete); d.add(kvDelete); Put p = new Put(rows[1 - i]); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index 83c7dbfded5..90cf10c96ea 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -24,10 +24,10 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Map; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest; @@ -75,8 +75,7 @@ public class TestServerCustomProtocol { static final String HELLO = "Hello, "; /* Test protocol implementation */ - public static class PingHandler extends PingProtos.PingService - implements Coprocessor, CoprocessorService { + public static class PingHandler extends PingProtos.PingService implements RegionCoprocessor { private int counter = 0; @Override @@ -125,8 +124,8 @@ public class TestServerCustomProtocol { } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } } @@ -320,7 +319,7 @@ public class TestServerCustomProtocol { // rows from 1 region assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); - + final String name = "NAME"; results = hello(table, name, null, ROW_A); // Should have gotten results for 1 of the three regions only since we specified @@ -343,12 +342,12 @@ public class TestServerCustomProtocol { // test,,1355943549657.c65d4822d8bdecc033a96451f3a0f55d. // test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e. // test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74. - + Map results = ping(table, null, ROW_A); // Should contain first region only. assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); - + // Test start row + empty end results = ping(table, ROW_BC, null); assertEquals(2, results.size()); @@ -358,7 +357,7 @@ public class TestServerCustomProtocol { results.get(loc.getRegionInfo().getRegionName())); verifyRegionResults(locator, results, ROW_B); verifyRegionResults(locator, results, ROW_C); - + // test empty start + end results = ping(table, null, ROW_BC); // should contain the first 2 regions @@ -368,7 +367,7 @@ public class TestServerCustomProtocol { loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", results.get(loc.getRegionInfo().getRegionName())); - + // test explicit start + end results = ping(table, ROW_AB, ROW_BC); // should contain first 2 regions @@ -378,7 +377,7 @@ public class TestServerCustomProtocol { loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", results.get(loc.getRegionInfo().getRegionName())); - + // test single region results = ping(table, ROW_B, ROW_BC); // should only contain region bbb diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java index 79ff25b9908..5001e043159 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; @@ -28,7 +29,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest; import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; @@ -57,7 +57,7 @@ import com.google.protobuf.Service; /** * Defines a protocol to delete data in bulk based on a scan. The scan can be range scan or with - * conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s) + * conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s) * or version(s) of columns.When delete type is FAMILY or COLUMN, which all family(s) or column(s) * getting deleted will be determined by the Scan. Scan need to select all the families/qualifiers * which need to be deleted.When delete type is VERSION, Which column(s) and version(s) to be @@ -65,16 +65,16 @@ import com.google.protobuf.Service; * which needs to be deleted.When a timestamp is passed only one version at that timestamp will be * deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions * which the Scan selects will get deleted. - * + * *
Example:

  * Scan scan = new Scan();
  * // set scan properties(rowkey range, filters, timerange etc).
  * HTable ht = ...;
  * long noOfDeletedRows = 0L;
- * Batch.Call<BulkDeleteService, BulkDeleteResponse> callable = 
+ * Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
  *     new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
  *   ServerRpcController controller = new ServerRpcController();
- *   BlockingRpcCallback<BulkDeleteResponse> rpcCallback = 
+ *   BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
  *     new BlockingRpcCallback<BulkDeleteResponse>();
  *
  *   public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
@@ -95,16 +95,15 @@ import com.google.protobuf.Service;
  * }
  * 
*/ -public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService, - Coprocessor { +public class BulkDeleteEndpoint extends BulkDeleteService implements RegionCoprocessor { private static final String NO_OF_VERSIONS_TO_DELETE = "noOfVersionsToDelete"; private static final Log LOG = LogFactory.getLog(BulkDeleteEndpoint.class); private RegionCoprocessorEnvironment env; @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java index a93935d45c6..c27672cf9d3 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.coprocessor.example; import java.io.IOException; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -45,7 +47,11 @@ import org.apache.hadoop.hbase.metrics.Timer; *

* @see ExampleRegionObserverWithMetrics */ -public class ExampleMasterObserverWithMetrics implements MasterObserver { +public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, MasterObserver { + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } private static final Log LOG = LogFactory.getLog(ExampleMasterObserverWithMetrics.class); @@ -133,4 +139,4 @@ public class ExampleMasterObserverWithMetrics implements MasterObserver { registry.register("maxMemory", this::getMaxMemory); } } -} +} \ No newline at end of file diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleRegionObserverWithMetrics.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleRegionObserverWithMetrics.java index fd593a7edf3..f03b91513eb 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleRegionObserverWithMetrics.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleRegionObserverWithMetrics.java @@ -22,12 +22,14 @@ package org.apache.hadoop.hbase.coprocessor.example; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.concurrent.ThreadLocalRandom; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.metrics.Counter; @@ -45,36 +47,49 @@ import org.apache.hadoop.hbase.metrics.Timer; * * @see ExampleMasterObserverWithMetrics */ -public class ExampleRegionObserverWithMetrics implements RegionObserver { +public class ExampleRegionObserverWithMetrics implements RegionCoprocessor { private Counter preGetCounter; private Timer costlyOperationTimer; + private ExampleRegionObserver observer; - @Override - public void preGetOp(ObserverContext e, Get get, List results) - throws IOException { - // Increment the Counter whenever the coprocessor is called - preGetCounter.increment(); - } + class ExampleRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } - @Override - public void postGetOp(ObserverContext e, Get get, - List results) throws IOException { - // do a costly (high latency) operation which we want to measure how long it takes by - // using a Timer (which is a Meter and a Histogram). - long start = System.nanoTime(); - try { - performCostlyOperation(); - } finally { - costlyOperationTimer.updateNanos(System.nanoTime() - start); + @Override + public void preGetOp(ObserverContext e, Get get, + List results) throws IOException { + // Increment the Counter whenever the coprocessor is called + preGetCounter.increment(); + } + + @Override + public void postGetOp(ObserverContext e, Get get, + List results) throws IOException { + // do a costly (high latency) operation which we want to measure how long it takes by + // using a Timer (which is a Meter and a Histogram). + long start = System.nanoTime(); + try { + performCostlyOperation(); + } finally { + costlyOperationTimer.updateNanos(System.nanoTime() - start); + } + } + + private void performCostlyOperation() { + try { + // simulate the operation by sleeping. + Thread.sleep(ThreadLocalRandom.current().nextLong(100)); + } catch (InterruptedException ignore) { + } } } - private void performCostlyOperation() { - try { - // simulate the operation by sleeping. - Thread.sleep(ThreadLocalRandom.current().nextLong(100)); - } catch (InterruptedException ignore) {} + @Override public Optional getRegionObserver() { + return Optional.of(observer); } @Override @@ -88,6 +103,7 @@ public class ExampleRegionObserverWithMetrics implements RegionObserver { // at the region server level per-regionserver. MetricRegistry registry = ((RegionCoprocessorEnvironment) env).getMetricRegistryForRegionServer(); + observer = new ExampleRegionObserver(); if (preGetCounter == null) { // Create a new Counter, or get the already registered counter. diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java index 5b974112a4a..4709d55e707 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RefreshHFilesEndpoint.java @@ -23,16 +23,16 @@ import com.google.protobuf.RpcController; import com.google.protobuf.Service; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos; import org.apache.hadoop.hbase.regionserver.Store; import java.io.IOException; +import java.util.Optional; /** * Coprocessor endpoint to refresh HFiles on replica. @@ -43,7 +43,7 @@ import java.io.IOException; *

*/ public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesService - implements Coprocessor, CoprocessorService { + implements RegionCoprocessor { protected static final Log LOG = LogFactory.getLog(RefreshHFilesEndpoint.class); private RegionCoprocessorEnvironment env; @@ -51,8 +51,8 @@ public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesServ } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java index 598008bb716..7e7532400f6 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java @@ -21,14 +21,14 @@ package org.apache.hadoop.hbase.coprocessor.example; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; @@ -48,8 +48,7 @@ import com.google.protobuf.Service; * hbase-examples/src/main/protobuf/Examples.proto. *

*/ -public class RowCountEndpoint extends ExampleProtos.RowCountService - implements Coprocessor, CoprocessorService { +public class RowCountEndpoint extends ExampleProtos.RowCountService implements RegionCoprocessor { private RegionCoprocessorEnvironment env; public RowCountEndpoint() { @@ -59,8 +58,8 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService * Just returns a reference to this object, which implements the RowCounterService interface. */ @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } /** diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 7b8fdf34f6a..733a0036fd2 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor.example; import java.io.IOException; import java.util.List; import java.util.NavigableSet; +import java.util.Optional; import java.util.OptionalInt; import org.apache.commons.logging.Log; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.HStore; @@ -51,20 +53,25 @@ import org.apache.zookeeper.ZooKeeper; * This is an example showing how a RegionObserver could configured * via ZooKeeper in order to control a Region compaction, flush, and scan policy. * - * This also demonstrated the use of shared + * This also demonstrated the use of shared * {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state. * See {@link RegionCoprocessorEnvironment#getSharedData()}. * * This would be useful for an incremental backup tool, which would indicate the last * time of a successful backup via ZK and instruct HBase to not delete data that was - * inserted since (based on wall clock time). + * inserted since (based on wall clock time). * * This implements org.apache.zookeeper.Watcher directly instead of using - * {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher}, + * {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher}, * because RegionObservers come and go and currently * listeners registered with ZooKeeperWatcher cannot be removed. */ -public class ZooKeeperScanPolicyObserver implements RegionObserver { +public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + // The zk ensemble info is put in hbase config xml with given custom key. public static final String ZK_ENSEMBLE_KEY = "ZooKeeperScanPolicyObserver.zookeeper.ensemble"; public static final String ZK_SESSION_TIMEOUT_KEY = @@ -243,4 +250,4 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver { return new StoreScanner((HStore) store, scanInfo, scan, targetCols, ((HStore) store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED)); } -} +} \ No newline at end of file diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index e95ed7c227e..4eb5e41848e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -155,12 +157,18 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { private boolean load = false; private boolean check = false; - public static class SlowMeCoproScanOperations implements RegionObserver { + public static class SlowMeCoproScanOperations implements RegionCoprocessor, RegionObserver { static final AtomicLong sleepTime = new AtomicLong(2000); Random r = new Random(); AtomicLong countOfNext = new AtomicLong(0); AtomicLong countOfOpen = new AtomicLong(0); public SlowMeCoproScanOperations() {} + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public RegionScanner preScannerOpen(final ObserverContext e, final Scan scan, final RegionScanner s) throws IOException { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 03a50e6462f..18a745cd9df 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.UUID; import org.apache.commons.logging.Log; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; @@ -247,7 +249,13 @@ public class TestImportTSVWithOperationAttributes implements Configurable { assertTrue(verified); } - public static class OperationAttributesTestController implements RegionObserver { + public static class OperationAttributesTestController + implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public void prePut(ObserverContext e, Put put, WALEdit edit, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index 3a7380bbeab..a1b409759c2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.UUID; import org.apache.commons.logging.Log; @@ -34,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; @@ -156,7 +158,12 @@ public class TestImportTSVWithTTLs implements Configurable { return tool; } - public static class TTLCheckingObserver implements RegionObserver { + public static class TTLCheckingObserver implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public void prePut(ObserverContext e, Put put, WALEdit edit, diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 657dbbabab7..afad353e9a7 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup; import java.io.IOException; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import com.google.protobuf.RpcCallback; @@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -69,8 +70,9 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.yetus.audience.InterfaceAudience; +// TODO: Encapsulate MasterObserver functions into separate subclass. @InterfaceAudience.Private -public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService { +public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { private static final Log LOG = LogFactory.getLog(RSGroupAdminEndpoint.class); private MasterServices master = null; @@ -93,8 +95,13 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService } @Override - public Service getService() { - return groupAdminService; + public Optional getService() { + return Optional.of(groupAdminService); + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); } RSGroupInfoManager getGroupInfoManager() { @@ -106,12 +113,6 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService * This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol * buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with * the response. - * Since our CoprocessorHost asks the Coprocessor for a Service - * ({@link CoprocessorService#getService()}) instead of doing "coproc instanceOf Service" - * and requiring Coprocessor itself to be Service (something we do with our observers), - * we can use composition instead of inheritance here. That makes it easy to manage - * functionalities in concise classes (sometimes inner classes) instead of single class doing - * many different things. */ private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService { @Override diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java index c58dc9db246..a9493cad4b6 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java @@ -97,8 +97,8 @@ public class TestRSGroups extends TestRSGroupsBase { admin.setBalancerRunning(false,true); rsGroupAdmin = new VerifyingRSGroupAdminClient( new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration()); - rsGroupAdminEndpoint = - master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0); + rsGroupAdminEndpoint = (RSGroupAdminEndpoint) + master.getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName()); } @AfterClass diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index f3040af197c..025bb2482bd 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -161,7 +161,7 @@ public class TestRSGroupsOfflineMode { // Get groupInfoManager from the new active master. RSGroupInfoManager groupMgr = ((MiniHBaseCluster)cluster).getMaster().getMasterCoprocessorHost() - .findCoprocessors(RSGroupAdminEndpoint.class).get(0).getGroupInfoManager(); + .findCoprocessor(RSGroupAdminEndpoint.class).getGroupInfoManager(); // Make sure balancer is in offline mode, since this is what we're testing. assertFalse(groupMgr.isOnline()); // Verify the group affiliation that's loaded from ZK instead of tables. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java index 788d25bae6e..14720570271 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase; +import com.google.protobuf.Service; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -32,6 +33,7 @@ import java.rmi.server.RMIClientSocketFactory; import java.rmi.server.RMIServerSocketFactory; import java.rmi.server.UnicastRemoteObject; import java.util.HashMap; +import java.util.Optional; import javax.management.MBeanServer; import javax.management.remote.JMXConnectorServer; @@ -46,8 +48,7 @@ import javax.management.remote.rmi.RMIConnectorServer; * 2)support password authentication * 3)support subset of SSL (with default configuration) */ -public class JMXListener implements Coprocessor { - +public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor { private static final Log LOG = LogFactory.getLog(JMXListener.class); public static final String RMI_REGISTRY_PORT_CONF_KEY = ".rmi.registry.port"; public static final String RMI_CONNECTOR_PORT_CONF_KEY = ".rmi.connector.port"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index a7ccb18eae0..ef09b5b950f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -38,7 +38,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost.Environment; +import org.apache.hadoop.hbase.coprocessor.BaseEnvironment; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.io.MultipleIOException; @@ -70,7 +70,7 @@ public final class HTableWrapper implements Table { * @throws IOException */ public static Table createWrapper(List openTables, - TableName tableName, Environment env, ExecutorService pool) throws IOException { + TableName tableName, BaseEnvironment env, ExecutorService pool) throws IOException { return new HTableWrapper(openTables, tableName, CoprocessorHConnection.getConnectionForEnvironment(env), pool); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index f01034c15ff..582fabf4eb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.constraint; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -42,7 +44,7 @@ import org.apache.hadoop.hbase.wal.WALEdit; * implemented on any given system by a coprocessor. */ @InterfaceAudience.Private -public class ConstraintProcessor implements RegionObserver { +public class ConstraintProcessor implements RegionCoprocessor, RegionObserver { private static final Log LOG = LogFactory.getLog(ConstraintProcessor.class); @@ -50,6 +52,11 @@ public class ConstraintProcessor implements RegionObserver { private List constraints = new ArrayList<>(); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + /** * Create the constraint processor. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java new file mode 100644 index 00000000000..a491d609a19 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.java @@ -0,0 +1,187 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HTableWrapper; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.yetus.audience.InterfaceAudience; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; + +/** + * Encapsulation of the environment of each coprocessor + */ +@InterfaceAudience.Private +public class BaseEnvironment implements CoprocessorEnvironment { + private static final Log LOG = LogFactory.getLog(BaseEnvironment.class); + + /** The coprocessor */ + public C impl; + /** Chaining priority */ + protected int priority = Coprocessor.PRIORITY_USER; + /** Current coprocessor state */ + Coprocessor.State state = Coprocessor.State.UNINSTALLED; + /** Accounting for tables opened by the coprocessor */ + protected List

openTables = + Collections.synchronizedList(new ArrayList
()); + private int seq; + private Configuration conf; + private ClassLoader classLoader; + + /** + * Constructor + * @param impl the coprocessor instance + * @param priority chaining priority + */ + public BaseEnvironment(final C impl, final int priority, + final int seq, final Configuration conf) { + this.impl = impl; + this.classLoader = impl.getClass().getClassLoader(); + this.priority = priority; + this.state = Coprocessor.State.INSTALLED; + this.seq = seq; + this.conf = conf; + } + + /** Initialize the environment */ + @Override + public void startup() throws IOException { + if (state == Coprocessor.State.INSTALLED || + state == Coprocessor.State.STOPPED) { + state = Coprocessor.State.STARTING; + Thread currentThread = Thread.currentThread(); + ClassLoader hostClassLoader = currentThread.getContextClassLoader(); + try { + currentThread.setContextClassLoader(this.getClassLoader()); + impl.start(this); + state = Coprocessor.State.ACTIVE; + } finally { + currentThread.setContextClassLoader(hostClassLoader); + } + } else { + LOG.warn("Not starting coprocessor " + impl.getClass().getName() + + " because not inactive (state=" + state.toString() + ")"); + } + } + + /** Clean up the environment */ + @Override + public void shutdown() { + if (state == Coprocessor.State.ACTIVE) { + state = Coprocessor.State.STOPPING; + Thread currentThread = Thread.currentThread(); + ClassLoader hostClassLoader = currentThread.getContextClassLoader(); + try { + currentThread.setContextClassLoader(this.getClassLoader()); + impl.stop(this); + state = Coprocessor.State.STOPPED; + } catch (IOException ioe) { + LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe); + } finally { + currentThread.setContextClassLoader(hostClassLoader); + } + } else { + LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ + " because not active (state="+state.toString()+")"); + } + synchronized (openTables) { + // clean up any table references + for (Table table: openTables) { + try { + ((HTableWrapper)table).internalClose(); + } catch (IOException e) { + // nothing can be done here + LOG.warn("Failed to close " + + table.getName(), e); + } + } + } + } + + @Override + public C getInstance() { + return impl; + } + + @Override + public ClassLoader getClassLoader() { + return classLoader; + } + + @Override + public int getPriority() { + return priority; + } + + @Override + public int getLoadSequence() { + return seq; + } + + /** @return the coprocessor environment version */ + @Override + public int getVersion() { + return Coprocessor.VERSION; + } + + /** @return the HBase release */ + @Override + public String getHBaseVersion() { + return VersionInfo.getVersion(); + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + /** + * Open a table from within the Coprocessor environment + * @param tableName the table name + * @return an interface for manipulating the table + * @exception IOException Exception + */ + @Override + public Table getTable(TableName tableName) throws IOException { + return this.getTable(tableName, null); + } + + /** + * Open a table from within the Coprocessor environment + * @param tableName the table name + * @return an interface for manipulating the table + * @exception IOException Exception + */ + @Override + public Table getTable(TableName tableName, ExecutorService pool) throws IOException { + return HTableWrapper.createWrapper(openTables, tableName, this, pool); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java index 5886715746a..df3ed233452 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -46,19 +46,19 @@ import com.google.protobuf.Service; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public abstract class BaseRowProcessorEndpoint -extends RowProcessorService implements CoprocessorService, Coprocessor { +public abstract class BaseRowProcessorEndpoint +extends RowProcessorService implements RegionCoprocessor { private RegionCoprocessorEnvironment env; /** * Pass a processor to region to process multiple rows atomically. - * + * * The RowProcessor implementations should be the inner classes of your * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with * the Coprocessor endpoint together. * * See {@code TestRowProcessorEndpoint} for example. * - * The request contains information for constructing processor + * The request contains information for constructing processor * (see {@link #constructRowProcessorFromRequest}. The processor object defines * the read-modify-write procedure. */ @@ -83,8 +83,8 @@ extends RowProcessorService implements CoprocessorService, Coprocessor { } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java index e891cc0fda9..25e6522018e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBul */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface BulkLoadObserver extends Coprocessor { +public interface BulkLoadObserver { /** * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. * It can't bypass the default action, e.g., ctx.bypass() won't have effect. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 27ac33a96b6..da07c40a698 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -25,12 +25,13 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,22 +45,22 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTableWrapper; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; -import org.apache.hadoop.hbase.util.VersionInfo; /** * Provides the common setup framework and runtime services for coprocessor * invocation from HBase services. - * @param the specific environment extension that a concrete implementation + * @param type of specific coprocessor this host will handle + * @param type of specific coprocessor environment this host requires. * provides */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public abstract class CoprocessorHost { +public abstract class CoprocessorHost> { public static final String REGION_COPROCESSOR_CONF_KEY = "hbase.coprocessor.region.classes"; public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = @@ -81,7 +82,8 @@ public abstract class CoprocessorHost { private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); protected Abortable abortable; /** Ordered set of loaded coprocessors with lock */ - protected SortedList coprocessors = new SortedList<>(new EnvironmentPriorityComparator()); + protected final SortedList coprocEnvironments = + new SortedList<>(new EnvironmentPriorityComparator()); protected Configuration conf; // unique file prefix to use for local copies of jars when classloading protected String pathPrefix; @@ -96,7 +98,7 @@ public abstract class CoprocessorHost { * Not to be confused with the per-object _coprocessors_ (above), * coprocessorNames is static and stores the set of all coprocessors ever * loaded by any thread in this JVM. It is strictly additive: coprocessors are - * added to coprocessorNames, by loadInstance() but are never removed, since + * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since * the intention is to preserve a history of all loaded coprocessors for * diagnosis in case of server crash (HBASE-4014). */ @@ -118,7 +120,7 @@ public abstract class CoprocessorHost { */ public Set getCoprocessors() { Set returnValue = new TreeSet<>(); - for (CoprocessorEnvironment e: coprocessors) { + for (E e: coprocEnvironments) { returnValue.add(e.getInstance().getClass().getSimpleName()); } return returnValue; @@ -135,7 +137,7 @@ public abstract class CoprocessorHost { return; } - Class implClass = null; + Class implClass; // load default coprocessors from configure file String[] defaultCPClasses = conf.getStrings(confKey); @@ -156,10 +158,13 @@ public abstract class CoprocessorHost { implClass = cl.loadClass(className); // Add coprocessors as we go to guard against case where a coprocessor is specified twice // in the configuration - this.coprocessors.add(loadInstance(implClass, priority, conf)); - LOG.info("System coprocessor " + className + " was loaded " + - "successfully with priority (" + priority + ")."); - ++priority; + E env = checkAndLoadInstance(implClass, priority, conf); + if (env != null) { + this.coprocEnvironments.add(env); + LOG.info( + "System coprocessor " + className + " was loaded " + "successfully with priority (" + priority + ")."); + ++priority; + } } catch (Throwable t) { // We always abort if system coprocessors cannot be loaded abortServer(className, t); @@ -196,7 +201,7 @@ public abstract class CoprocessorHost { */ public E load(Path path, String className, int priority, Configuration conf, String[] includedClassPrefixes) throws IOException { - Class implClass = null; + Class implClass; LOG.debug("Loading coprocessor class " + className + " with path " + path + " and priority " + priority); @@ -223,7 +228,7 @@ public abstract class CoprocessorHost { try{ // switch temporarily to the thread classloader for custom CP currentThread.setContextClassLoader(cl); - E cpInstance = loadInstance(implClass, priority, conf); + E cpInstance = checkAndLoadInstance(implClass, priority, conf); return cpInstance; } finally { // restore the fresh (host) classloader @@ -231,16 +236,11 @@ public abstract class CoprocessorHost { } } - /** - * @param implClass Implementation class - * @param priority priority - * @param conf configuration - * @throws java.io.IOException Exception - */ - public void load(Class implClass, int priority, Configuration conf) + @VisibleForTesting + public void load(Class implClass, int priority, Configuration conf) throws IOException { - E env = loadInstance(implClass, priority, conf); - coprocessors.add(env); + E env = checkAndLoadInstance(implClass, priority, conf); + coprocEnvironments.add(env); } /** @@ -249,29 +249,22 @@ public abstract class CoprocessorHost { * @param conf configuration * @throws java.io.IOException Exception */ - public E loadInstance(Class implClass, int priority, Configuration conf) + public E checkAndLoadInstance(Class implClass, int priority, Configuration conf) throws IOException { - if (!Coprocessor.class.isAssignableFrom(implClass)) { - throw new IOException("Configured class " + implClass.getName() + " must implement " - + Coprocessor.class.getName() + " interface "); - } - // create the instance - Coprocessor impl; - Object o = null; + C impl; try { - o = implClass.newInstance(); - impl = (Coprocessor)o; - } catch (InstantiationException e) { - throw new IOException(e); - } catch (IllegalAccessException e) { + impl = checkAndGetInstance(implClass); + if (impl == null) { + LOG.error("Cannot load coprocessor " + implClass.getSimpleName()); + return null; + } + } catch (InstantiationException|IllegalAccessException e) { throw new IOException(e); } // create the environment - E env = createEnvironment(implClass, impl, priority, loadSequence.incrementAndGet(), conf); - if (env instanceof Environment) { - ((Environment)env).startup(); - } + E env = createEnvironment(impl, priority, loadSequence.incrementAndGet(), conf); + env.startup(); // HBASE-4014: maintain list of loaded coprocessors for later crash analysis // if server (master or regionserver) aborts. coprocessorNames.add(implClass.getName()); @@ -281,28 +274,30 @@ public abstract class CoprocessorHost { /** * Called when a new Coprocessor class is loaded */ - public abstract E createEnvironment(Class implClass, Coprocessor instance, - int priority, int sequence, Configuration conf); + public abstract E createEnvironment(C instance, int priority, int sequence, Configuration conf); - public void shutdown(CoprocessorEnvironment e) { - if (e instanceof Environment) { - if (LOG.isDebugEnabled()) { - LOG.debug("Stop coprocessor " + e.getInstance().getClass().getName()); - } - ((Environment)e).shutdown(); - } else { - LOG.warn("Shutdown called on unknown environment: "+ - e.getClass().getName()); + /** + * Called when a new Coprocessor class needs to be loaded. Checks if type of the given class + * is what the corresponding host implementation expects. If it is of correct type, returns an + * instance of the coprocessor to be loaded. If not, returns null. + * If an exception occurs when trying to create instance of a coprocessor, it's passed up and + * eventually results into server aborting. + */ + public abstract C checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException; + + public void shutdown(E e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Stop coprocessor " + e.getInstance().getClass().getName()); } + e.shutdown(); } /** - * Find a coprocessor implementation by class name - * @param className the class name - * @return the coprocessor, or null if not found + * Find coprocessors by full class name or simple name. */ - public Coprocessor findCoprocessor(String className) { - for (E env: coprocessors) { + public C findCoprocessor(String className) { + for (E env: coprocEnvironments) { if (env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className)) { return env.getInstance(); @@ -311,16 +306,26 @@ public abstract class CoprocessorHost { return null; } + @VisibleForTesting + public T findCoprocessor(Class cls) { + for (E env: coprocEnvironments) { + if (cls.isAssignableFrom(env.getInstance().getClass())) { + return (T) env.getInstance(); + } + } + return null; + } + /** * Find list of coprocessors that extend/implement the given class/interface * @param cls the class/interface to look for * @return the list of coprocessors, or null if not found */ - public List findCoprocessors(Class cls) { + public List findCoprocessors(Class cls) { ArrayList ret = new ArrayList<>(); - for (E env: coprocessors) { - Coprocessor cp = env.getInstance(); + for (E env: coprocEnvironments) { + C cp = env.getInstance(); if(cp != null) { if (cls.isAssignableFrom(cp.getClass())) { @@ -331,33 +336,14 @@ public abstract class CoprocessorHost { return ret; } - /** - * Find list of CoprocessorEnvironment that extend/implement the given class/interface - * @param cls the class/interface to look for - * @return the list of CoprocessorEnvironment, or null if not found - */ - public List findCoprocessorEnvironment(Class cls) { - ArrayList ret = new ArrayList<>(); - - for (E env: coprocessors) { - Coprocessor cp = env.getInstance(); - - if(cp != null) { - if (cls.isAssignableFrom(cp.getClass())) { - ret.add(env); - } - } - } - return ret; - } - /** * Find a coprocessor environment by class name * @param className the class name * @return the coprocessor, or null if not found */ - public CoprocessorEnvironment findCoprocessorEnvironment(String className) { - for (E env: coprocessors) { + @VisibleForTesting + public E findCoprocessorEnvironment(String className) { + for (E env: coprocEnvironments) { if (env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className)) { return env; @@ -374,7 +360,7 @@ public abstract class CoprocessorHost { Set getExternalClassLoaders() { Set externalClassLoaders = new HashSet<>(); final ClassLoader systemClassLoader = this.getClass().getClassLoader(); - for (E env : coprocessors) { + for (E env : coprocEnvironments) { ClassLoader cl = env.getInstance().getClass().getClassLoader(); if (cl != systemClassLoader){ //do not include system classloader @@ -388,8 +374,7 @@ public abstract class CoprocessorHost { * Environment priority comparator. * Coprocessors are chained in sorted order. */ - static class EnvironmentPriorityComparator - implements Comparator { + static class EnvironmentPriorityComparator implements Comparator { @Override public int compare(final CoprocessorEnvironment env1, final CoprocessorEnvironment env2) { @@ -407,153 +392,7 @@ public abstract class CoprocessorHost { } } - /** - * Encapsulation of the environment of each coprocessor - */ - public static class Environment implements CoprocessorEnvironment { - - /** The coprocessor */ - public Coprocessor impl; - /** Chaining priority */ - protected int priority = Coprocessor.PRIORITY_USER; - /** Current coprocessor state */ - Coprocessor.State state = Coprocessor.State.UNINSTALLED; - /** Accounting for tables opened by the coprocessor */ - protected List
openTables = - Collections.synchronizedList(new ArrayList
()); - private int seq; - private Configuration conf; - private ClassLoader classLoader; - - /** - * Constructor - * @param impl the coprocessor instance - * @param priority chaining priority - */ - public Environment(final Coprocessor impl, final int priority, - final int seq, final Configuration conf) { - this.impl = impl; - this.classLoader = impl.getClass().getClassLoader(); - this.priority = priority; - this.state = Coprocessor.State.INSTALLED; - this.seq = seq; - this.conf = conf; - } - - /** Initialize the environment */ - public void startup() throws IOException { - if (state == Coprocessor.State.INSTALLED || - state == Coprocessor.State.STOPPED) { - state = Coprocessor.State.STARTING; - Thread currentThread = Thread.currentThread(); - ClassLoader hostClassLoader = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(this.getClassLoader()); - impl.start(this); - state = Coprocessor.State.ACTIVE; - } finally { - currentThread.setContextClassLoader(hostClassLoader); - } - } else { - LOG.warn("Not starting coprocessor "+impl.getClass().getName()+ - " because not inactive (state="+state.toString()+")"); - } - } - - /** Clean up the environment */ - protected void shutdown() { - if (state == Coprocessor.State.ACTIVE) { - state = Coprocessor.State.STOPPING; - Thread currentThread = Thread.currentThread(); - ClassLoader hostClassLoader = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(this.getClassLoader()); - impl.stop(this); - state = Coprocessor.State.STOPPED; - } catch (IOException ioe) { - LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe); - } finally { - currentThread.setContextClassLoader(hostClassLoader); - } - } else { - LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+ - " because not active (state="+state.toString()+")"); - } - synchronized (openTables) { - // clean up any table references - for (Table table: openTables) { - try { - ((HTableWrapper)table).internalClose(); - } catch (IOException e) { - // nothing can be done here - LOG.warn("Failed to close " + - table.getName(), e); - } - } - } - } - - @Override - public Coprocessor getInstance() { - return impl; - } - - @Override - public ClassLoader getClassLoader() { - return classLoader; - } - - @Override - public int getPriority() { - return priority; - } - - @Override - public int getLoadSequence() { - return seq; - } - - /** @return the coprocessor environment version */ - @Override - public int getVersion() { - return Coprocessor.VERSION; - } - - /** @return the HBase release */ - @Override - public String getHBaseVersion() { - return VersionInfo.getVersion(); - } - - @Override - public Configuration getConfiguration() { - return conf; - } - - /** - * Open a table from within the Coprocessor environment - * @param tableName the table name - * @return an interface for manipulating the table - * @exception java.io.IOException Exception - */ - @Override - public Table getTable(TableName tableName) throws IOException { - return this.getTable(tableName, null); - } - - /** - * Open a table from within the Coprocessor environment - * @param tableName the table name - * @return an interface for manipulating the table - * @exception java.io.IOException Exception - */ - @Override - public Table getTable(TableName tableName, ExecutorService pool) throws IOException { - return HTableWrapper.createWrapper(openTables, tableName, this, pool); - } - } - - protected void abortServer(final CoprocessorEnvironment environment, final Throwable e) { + protected void abortServer(final E environment, final Throwable e) { abortServer(environment.getInstance().getClass().getName(), e); } @@ -586,8 +425,7 @@ public abstract class CoprocessorHost { // etc) mention this nuance of our exception handling so that coprocessor can throw appropriate // exceptions depending on situation. If any changes are made to this logic, make sure to // update all classes' comments. - protected void handleCoprocessorThrowable(final CoprocessorEnvironment env, final Throwable e) - throws IOException { + protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException { if (e instanceof IOException) { throw (IOException)e; } @@ -610,7 +448,7 @@ public abstract class CoprocessorHost { "environment",e); } - coprocessors.remove(env); + coprocEnvironments.remove(env); try { shutdown(env); } catch (Exception x) { @@ -695,4 +533,192 @@ public abstract class CoprocessorHost { "'. Details of the problem: " + message); } } + + /** + * Implementations defined function to get an observer of type {@code O} from a coprocessor of + * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each + * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for + * each of RegionObserver, EndpointObserver and BulkLoadObserver. + * These getters are used by {@code ObserverOperation} to get appropriate observer from the + * coprocessor. + */ + @FunctionalInterface + public interface ObserverGetter extends Function> {} + + private abstract class ObserverOperation extends ObserverContext { + ObserverGetter observerGetter; + + ObserverOperation(ObserverGetter observerGetter) { + this(observerGetter, RpcServer.getRequestUser()); + } + + ObserverOperation(ObserverGetter observerGetter, User user) { + super(user); + this.observerGetter = observerGetter; + } + + abstract void callObserver() throws IOException; + protected void postEnvCall() {} + } + + // Can't derive ObserverOperation from ObserverOperationWithResult (R = Void) because then all + // ObserverCaller implementations will have to have a return statement. + // O = observer, E = environment, C = coprocessor, R=result type + public abstract class ObserverOperationWithoutResult extends ObserverOperation { + protected abstract void call(O observer) throws IOException; + + public ObserverOperationWithoutResult(ObserverGetter observerGetter) { + super(observerGetter); + } + + public ObserverOperationWithoutResult(ObserverGetter observerGetter, User user) { + super(observerGetter, user); + } + + /** + * In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor} + * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all + * observers, in which case they will return null for that observer's getter. + * We simply ignore such cases. + */ + @Override + void callObserver() throws IOException { + Optional observer = observerGetter.apply(getEnvironment().getInstance()); + if (observer.isPresent()) { + call(observer.get()); + } + } + } + + public abstract class ObserverOperationWithResult extends ObserverOperation { + protected abstract R call(O observer) throws IOException; + + private R result; + + public ObserverOperationWithResult(ObserverGetter observerGetter) { + super(observerGetter); + } + + public ObserverOperationWithResult(ObserverGetter observerGetter, User user) { + super(observerGetter, user); + } + + void setResult(final R result) { + this.result = result; + } + + protected R getResult() { + return this.result; + } + + void callObserver() throws IOException { + Optional observer = observerGetter.apply(getEnvironment().getInstance()); + if (observer.isPresent()) { + result = call(observer.get()); + } + } + } + + ////////////////////////////////////////////////////////////////////////////////////////// + // Functions to execute observer hooks and handle results (if any) + ////////////////////////////////////////////////////////////////////////////////////////// + protected R execOperationWithResult(final R defaultValue, + final ObserverOperationWithResult observerOperation) throws IOException { + if (observerOperation == null) { + return defaultValue; + } + observerOperation.setResult(defaultValue); + execOperation(observerOperation); + return observerOperation.getResult(); + } + + // what does bypass mean? + protected R execOperationWithResult(final boolean ifBypass, final R defaultValue, + final ObserverOperationWithResult observerOperation) throws IOException { + if (observerOperation == null) { + return ifBypass ? null : defaultValue; + } else { + observerOperation.setResult(defaultValue); + boolean bypass = execOperation(true, observerOperation); + R result = observerOperation.getResult(); + return bypass == ifBypass ? result : null; + } + } + + protected boolean execOperation(final ObserverOperation observerOperation) + throws IOException { + return execOperation(true, observerOperation); + } + + protected boolean execOperation(final boolean earlyExit, + final ObserverOperation observerOperation) throws IOException { + if (observerOperation == null) return false; + boolean bypass = false; + List envs = coprocEnvironments.get(); + for (E env : envs) { + observerOperation.prepare(env); + Thread currentThread = Thread.currentThread(); + ClassLoader cl = currentThread.getContextClassLoader(); + try { + currentThread.setContextClassLoader(env.getClassLoader()); + observerOperation.callObserver(); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } finally { + currentThread.setContextClassLoader(cl); + } + bypass |= observerOperation.shouldBypass(); + if (earlyExit && observerOperation.shouldComplete()) { + break; + } + observerOperation.postEnvCall(); + } + return bypass; + } + + + /** + * Coprocessor classes can be configured in any order, based on that priority is set and + * chained in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is + * going down. This function first calls coprocessor methods (using ObserverOperation.call()) + * and then shutdowns the environment in postEnvCall().
+ * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors + * may remain shutdown if any exception occurs during next coprocessor execution which prevent + * master/regionserver stop or cluster shutdown. (Refer: + * HBASE-16663 + * @return true if bypaas coprocessor execution, false if not. + * @throws IOException + */ + protected boolean execShutdown(final ObserverOperation observerOperation) + throws IOException { + if (observerOperation == null) return false; + boolean bypass = false; + List envs = coprocEnvironments.get(); + // Iterate the coprocessors and execute ObserverOperation's call() + for (E env : envs) { + observerOperation.prepare(env); + Thread currentThread = Thread.currentThread(); + ClassLoader cl = currentThread.getContextClassLoader(); + try { + currentThread.setContextClassLoader(env.getClassLoader()); + observerOperation.callObserver(); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } finally { + currentThread.setContextClassLoader(cl); + } + bypass |= observerOperation.shouldBypass(); + if (observerOperation.shouldComplete()) { + break; + } + } + + // Iterate the coprocessors and execute ObserverOperation's postEnvCall() + for (E env : envs) { + observerOperation.prepare(env); + observerOperation.postEnvCall(); + } + return bypass; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java index efee64c63c2..f6102290dec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorService.java @@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** * Coprocessor endpoints providing protobuf services should implement this * interface and return the {@link Service} instance via {@link #getService()}. + * @deprecated Since 2.0. Will be removed in 3.0 */ +@Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface CoprocessorService { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java new file mode 100644 index 00000000000..c677d638487 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorServiceBackwardCompatiblity.java @@ -0,0 +1,86 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import com.google.protobuf.Service; +import org.apache.yetus.audience.InterfaceAudience; + +import java.util.Optional; + +/** + * Classes to help maintain backward compatibility with now deprecated {@link CoprocessorService} + * and {@link SingletonCoprocessorService}. + * From 2.0 onwards, implementors of coprocessor service should also implement the relevant + * coprocessor class (For eg {@link MasterCoprocessor} for coprocessor service in master), and + * override get*Service() method to return the {@link com.google.protobuf.Service} object. + * To maintain backward compatibility with 1.0 implementation, we'll wrap implementation of + * CoprocessorService/SingletonCoprocessorService in the new + * {Master, Region, RegionServer}Coprocessor class. + * Since there is no backward compatibility guarantee for Observers, we leave get*Observer() to + * default which returns null. + * This approach to maintain backward compatibility seems cleaner and more explicit. + */ +@InterfaceAudience.Private +@Deprecated +public class CoprocessorServiceBackwardCompatiblity { + + static public class MasterCoprocessorService implements MasterCoprocessor { + + CoprocessorService service; + + public MasterCoprocessorService(CoprocessorService service) { + this.service = service; + } + + @Override + public Optional getService() { + return Optional.of(service.getService()); + } + } + + static public class RegionCoprocessorService implements RegionCoprocessor { + + CoprocessorService service; + + public RegionCoprocessorService(CoprocessorService service) { + this.service = service; + } + + @Override + public Optional getService() { + return Optional.of(service.getService()); + } + } + + static public class RegionServerCoprocessorService implements RegionServerCoprocessor { + + SingletonCoprocessorService service; + + public RegionServerCoprocessorService(SingletonCoprocessorService service) { + this.service = service; + } + + @Override + public Optional getService() { + return Optional.of(service.getService()); + } + } +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java index fc0e666ac77..096247cf9fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java @@ -50,7 +50,7 @@ import com.google.protobuf.Service; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface EndpointObserver extends Coprocessor { +public interface EndpointObserver { /** * Called before an Endpoint service method is invoked. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java new file mode 100644 index 00000000000..d940385ffae --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessor.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import java.util.Optional; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface MasterCoprocessor extends Coprocessor { + default Optional getMasterObserver() { + return Optional.empty(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index adab32f0405..1668b69f2db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment { +public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment { /** @return reference to the HMaster services */ MasterServices getMasterServices(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 87b9679471a..bfa88e6c94a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -77,7 +77,7 @@ import org.apache.yetus.audience.InterfaceStability; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface MasterObserver extends Coprocessor { +public interface MasterObserver { /** * Called before a new table is created by * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index a4dcae0da27..c4fb440795e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.SortedSet; import java.util.TreeSet; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -77,8 +77,7 @@ import com.google.protobuf.Service; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public class MultiRowMutationEndpoint extends MultiRowMutationService implements -CoprocessorService, Coprocessor { +public class MultiRowMutationEndpoint extends MultiRowMutationService implements RegionCoprocessor { private RegionCoprocessorEnvironment env; @Override public void mutateRows(RpcController controller, MutateRowsRequest request, @@ -120,10 +119,9 @@ CoprocessorService, Coprocessor { done.run(response); } - @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index ba71129ecc2..0192ea37680 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -116,13 +116,13 @@ public class ObserverContext { * @param env The coprocessor environment to set * @param context An existing ObserverContext instance to use, or null * to create a new instance - * @param The environment type for the context + * @param The environment type for the context * @return An instance of ObserverContext with the environment set */ @Deprecated // TODO: Remove this method, ObserverContext should not depend on RpcServer - public static ObserverContext createAndPrepare( - T env, ObserverContext context) { + public static ObserverContext createAndPrepare( + E env, ObserverContext< E> context) { if (context == null) { context = new ObserverContext<>(RpcServer.getRequestUser()); } @@ -140,11 +140,11 @@ public class ObserverContext { * @param context An existing ObserverContext instance to use, or null * to create a new instance * @param user The requesting caller for the execution context - * @param The environment type for the context + * @param The environment type for the context * @return An instance of ObserverContext with the environment set */ - public static ObserverContext createAndPrepare( - T env, ObserverContext context, User user) { + public static ObserverContext createAndPrepare( + E env, ObserverContext context, User user) { if (context == null) { context = new ObserverContext<>(user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java new file mode 100644 index 00000000000..16c6d399040 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import java.util.Optional; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface RegionCoprocessor extends Coprocessor { + + default Optional getRegionObserver() { + return Optional.empty(); + } + + default Optional getEndpointObserver() { + return Optional.empty(); + } + + default Optional getBulkLoadObserver() { + return Optional.empty(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index dceb3d4d03a..b29cd287832 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -32,7 +32,7 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { +public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { /** @return the region associated with this coprocessor */ Region getRegion(); @@ -61,6 +61,4 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { // so we do not want to allow coprocessors to export metrics at the region level. We can allow // getMetricRegistryForTable() to allow coprocessors to track metrics per-table, per-regionserver. MetricRegistry getMetricRegistryForRegionServer(); - - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 60e5f40ba37..75c1da9fcd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -99,7 +99,7 @@ import org.apache.yetus.audience.InterfaceStability; // TODO as method signatures need to break, update to // ObserverContext // so we can use additional environment state that isn't exposed to coprocessors. -public interface RegionObserver extends Coprocessor { +public interface RegionObserver { /** Mutation type for postMutationBeforeWAL hook */ enum MutationType { APPEND, INCREMENT diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java new file mode 100644 index 00000000000..66d8113a87a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessor.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import java.util.Optional; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface RegionServerCoprocessor extends Coprocessor { + default Optional getRegionServerObserver() { + return Optional.empty(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java index da3189fbf48..ecd0f3e15f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerCoprocessorEnvironment.java @@ -27,7 +27,8 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironment { +public interface RegionServerCoprocessorEnvironment + extends CoprocessorEnvironment { /** * Gets the region server services. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index 5d68eec54b7..c1af3fb3787 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceStability; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface RegionServerObserver extends Coprocessor { +public interface RegionServerObserver { /** * Called before stopping region server. * @param ctx the environment to interact with the framework and region server. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java index c7131ff79bb..719acf76eef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/SingletonCoprocessorService.java @@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** * Coprocessor endpoints registered once per server and providing protobuf services should implement * this interface and return the {@link Service} instance via {@link #getService()}. + * @deprecated Since 2.0. Will be removed in 3.0 */ +@Deprecated @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface SingletonCoprocessorService { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java new file mode 100644 index 00000000000..d87c06d6ced --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessor.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import java.util.Optional; + +/** + * WALCoprocessor don't support loading services using {@link #getService()}. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface WALCoprocessor extends Coprocessor { + Optional getWALObserver(); + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java index 8ea399d5a85..71c72a2e7f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.wal.WAL; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface WALCoprocessorEnvironment extends CoprocessorEnvironment { +public interface WALCoprocessorEnvironment extends CoprocessorEnvironment { /** @return reference to the region server's WAL */ WAL getWAL(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index 52c27f7bc99..2190abf6067 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -66,7 +66,7 @@ import org.apache.yetus.audience.InterfaceStability; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface WALObserver extends Coprocessor { +public interface WALObserver { /** * Called before a {@link WALEdit} * is writen to WAL. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java index 8a677eef868..a6b5c4bc200 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java @@ -181,9 +181,8 @@ To implement an Endpoint, you need to: protocol buffer guide for more details on defining services.
  • Generate the Service and Message code using the protoc compiler
  • -
  • Implement the generated Service interface in your coprocessor class and implement the - CoprocessorService interface. The CoprocessorService.getService() - method should return a reference to the Endpoint's protocol buffer Service instance. +
  • Implement the generated Service interface and override get*Service() method in + relevant Coprocessor to return a reference to the Endpoint's protocol buffer Service instance.

    For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 30d801585d5..56cf496297a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -23,11 +23,9 @@ import java.io.IOException; import java.util.List; import java.util.Set; -import org.apache.commons.lang3.ClassUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; @@ -38,13 +36,15 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.BaseEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.metrics.MetricRegistry; @@ -65,7 +65,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Private public class MasterCoprocessorHost - extends CoprocessorHost { + extends CoprocessorHost { private static final Log LOG = LogFactory.getLog(MasterCoprocessorHost.class); @@ -73,21 +73,20 @@ public class MasterCoprocessorHost * Coprocessor environment extension providing access to master related * services. */ - static class MasterEnvironment extends CoprocessorHost.Environment + private static class MasterEnvironment extends BaseEnvironment implements MasterCoprocessorEnvironment { private final MasterServices masterServices; private final boolean supportGroupCPs; private final MetricRegistry metricRegistry; - public MasterEnvironment(final Class implClass, final Coprocessor impl, - final int priority, final int seq, final Configuration conf, - final MasterServices services) { + public MasterEnvironment(final MasterCoprocessor impl, final int priority, final int seq, + final Configuration conf, final MasterServices services) { super(impl, priority, seq, conf); this.masterServices = services; supportGroupCPs = !useLegacyMethod(impl.getClass(), "preBalanceRSGroup", ObserverContext.class, String.class); this.metricRegistry = - MetricsCoprocessor.createRegistryForMasterCoprocessor(implClass.getName()); + MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName()); } @Override @@ -101,7 +100,7 @@ public class MasterCoprocessorHost } @Override - protected void shutdown() { + public void shutdown() { super.shutdown(); MetricsCoprocessor.removeRegistry(this.metricRegistry); } @@ -122,120 +121,142 @@ public class MasterCoprocessorHost loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY); } + + @Override - public MasterEnvironment createEnvironment(final Class implClass, - final Coprocessor instance, final int priority, final int seq, - final Configuration conf) { - for (Object itf : ClassUtils.getAllInterfaces(implClass)) { - Class c = (Class) itf; - if (CoprocessorService.class.isAssignableFrom(c)) { - masterServices.registerService(((CoprocessorService)instance).getService()); - } - } - return new MasterEnvironment(implClass, instance, priority, seq, conf, - masterServices); + public MasterEnvironment createEnvironment(final MasterCoprocessor instance, final int priority, + final int seq, final Configuration conf) { + instance.getService().ifPresent(masterServices::registerService); + return new MasterEnvironment(instance, priority, seq, conf, masterServices); } + @Override + public MasterCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if (MasterCoprocessor.class.isAssignableFrom(implClass)) { + return (MasterCoprocessor)implClass.newInstance(); + } else if (CoprocessorService.class.isAssignableFrom(implClass)) { + // For backward compatibility with old CoprocessorService impl which don't extend + // MasterCoprocessor. + return new CoprocessorServiceBackwardCompatiblity.MasterCoprocessorService( + (CoprocessorService)implClass.newInstance()); + } else { + LOG.error(implClass.getName() + " is not of type MasterCoprocessor. Check the " + + "configuration " + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + return null; + } + } + + private ObserverGetter masterObserverGetter = + MasterCoprocessor::getMasterObserver; + + abstract class MasterObserverOperation extends + ObserverOperationWithoutResult { + public MasterObserverOperation(){ + super(masterObserverGetter); + } + + public MasterObserverOperation(User user) { + super(masterObserverGetter, user); + } + } + + + ////////////////////////////////////////////////////////////////////////////////////////////////// + // MasterObserver operations + ////////////////////////////////////////////////////////////////////////////////////////////////// + + public boolean preCreateNamespace(final NamespaceDescriptor ns) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCreateNamespace(ctx, ns); + public void call(MasterObserver observer) throws IOException { + observer.preCreateNamespace(this, ns); } }); } public void postCreateNamespace(final NamespaceDescriptor ns) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCreateNamespace(ctx, ns); + public void call(MasterObserver observer) throws IOException { + observer.postCreateNamespace(this, ns); } }); } public boolean preDeleteNamespace(final String namespaceName) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteNamespace(ctx, namespaceName); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteNamespace(this, namespaceName); } }); } public void postDeleteNamespace(final String namespaceName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDeleteNamespace(ctx, namespaceName); + public void call(MasterObserver observer) throws IOException { + observer.postDeleteNamespace(this, namespaceName); } }); } public boolean preModifyNamespace(final NamespaceDescriptor ns) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preModifyNamespace(ctx, ns); + public void call(MasterObserver observer) throws IOException { + observer.preModifyNamespace(this, ns); } }); } public void postModifyNamespace(final NamespaceDescriptor ns) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postModifyNamespace(ctx, ns); + public void call(MasterObserver observer) throws IOException { + observer.postModifyNamespace(this, ns); } }); } public void preGetNamespaceDescriptor(final String namespaceName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetNamespaceDescriptor(ctx, namespaceName); + public void call(MasterObserver observer) throws IOException { + observer.preGetNamespaceDescriptor(this, namespaceName); } }); } public void postGetNamespaceDescriptor(final NamespaceDescriptor ns) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetNamespaceDescriptor(ctx, ns); + public void call(MasterObserver observer) throws IOException { + observer.postGetNamespaceDescriptor(this, ns); } }); } public boolean preListNamespaceDescriptors(final List descriptors) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preListNamespaceDescriptors(ctx, descriptors); + public void call(MasterObserver observer) throws IOException { + observer.preListNamespaceDescriptors(this, descriptors); } }); } public void postListNamespaceDescriptors(final List descriptors) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postListNamespaceDescriptors(ctx, descriptors); + public void call(MasterObserver observer) throws IOException { + observer.postListNamespaceDescriptors(this, descriptors); } }); } @@ -244,195 +265,175 @@ public class MasterCoprocessorHost public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCreateTable(ctx, htd, regions); + public void call(MasterObserver observer) throws IOException { + observer.preCreateTable(this, htd, regions); } }); } public void postCreateTable(final TableDescriptor htd, final RegionInfo[] regions) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCreateTable(ctx, htd, regions); + public void call(MasterObserver observer) throws IOException { + observer.postCreateTable(this, htd, regions); } }); } public void preCreateTableAction(final TableDescriptor htd, final RegionInfo[] regions, - final User user) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + final User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCreateTableAction(ctx, htd, regions); + public void call(MasterObserver observer) throws IOException { + observer.preCreateTableAction(this, htd, regions); } }); } public void postCompletedCreateTableAction( final TableDescriptor htd, final RegionInfo[] regions, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedCreateTableAction(ctx, htd, regions); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedCreateTableAction(this, htd, regions); } }); } public void preDeleteTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteTable(this, tableName); } }); } public void postDeleteTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDeleteTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postDeleteTable(this, tableName); } }); } public void preDeleteTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteTableAction(this, tableName); } }); } public void postCompletedDeleteTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedDeleteTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedDeleteTableAction(this, tableName); } }); } public void preTruncateTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preTruncateTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preTruncateTable(this, tableName); } }); } public void postTruncateTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postTruncateTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postTruncateTable(this, tableName); } }); } - public void preTruncateTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + public void preTruncateTableAction(final TableName tableName, final User user) + throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preTruncateTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preTruncateTableAction(this, tableName); } }); } public void postCompletedTruncateTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedTruncateTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedTruncateTableAction(this, tableName); } }); } public void preModifyTable(final TableName tableName, final TableDescriptor htd) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preModifyTable(ctx, tableName, htd); + public void call(MasterObserver observer) throws IOException { + observer.preModifyTable(this, tableName, htd); } }); } public void postModifyTable(final TableName tableName, final TableDescriptor htd) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postModifyTable(ctx, tableName, htd); + public void call(MasterObserver observer) throws IOException { + observer.postModifyTable(this, tableName, htd); } }); } public void preModifyTableAction(final TableName tableName, final TableDescriptor htd, - final User user) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + final User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preModifyTableAction(ctx, tableName, htd); + public void call(MasterObserver observer) throws IOException { + observer.preModifyTableAction(this, tableName, htd); } }); } public void postCompletedModifyTableAction(final TableName tableName, final TableDescriptor htd, - final User user) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + final User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedModifyTableAction(ctx, tableName, htd); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedModifyTableAction(this, tableName, htd); } }); } public boolean preAddColumn(final TableName tableName, final ColumnFamilyDescriptor columnFamily) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preAddColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preAddColumnFamily(this, tableName, columnFamily); } }); } public void postAddColumn(final TableName tableName, final ColumnFamilyDescriptor columnFamily) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postAddColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postAddColumnFamily(this, tableName, columnFamily); } }); } @@ -442,11 +443,10 @@ public class MasterCoprocessorHost final ColumnFamilyDescriptor columnFamily, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preAddColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preAddColumnFamilyAction(this, tableName, columnFamily); } }); } @@ -456,33 +456,30 @@ public class MasterCoprocessorHost final ColumnFamilyDescriptor columnFamily, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedAddColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedAddColumnFamilyAction(this, tableName, columnFamily); } }); } - public boolean preModifyColumn(final TableName tableName, final ColumnFamilyDescriptor columnFamily) - throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + public boolean preModifyColumn(final TableName tableName, + final ColumnFamilyDescriptor columnFamily) throws IOException { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preModifyColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preModifyColumnFamily(this, tableName, columnFamily); } }); } public void postModifyColumn(final TableName tableName, final ColumnFamilyDescriptor columnFamily) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postModifyColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postModifyColumnFamily(this, tableName, columnFamily); } }); } @@ -491,11 +488,10 @@ public class MasterCoprocessorHost final TableName tableName, final ColumnFamilyDescriptor columnFamily, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preModifyColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preModifyColumnFamilyAction(this, tableName, columnFamily); } }); } @@ -504,33 +500,30 @@ public class MasterCoprocessorHost final TableName tableName, final ColumnFamilyDescriptor columnFamily, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedModifyColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedModifyColumnFamilyAction(this, tableName, columnFamily); } }); } public boolean preDeleteColumn(final TableName tableName, final byte[] columnFamily) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteColumnFamily(this, tableName, columnFamily); } }); } public void postDeleteColumn(final TableName tableName, final byte[] columnFamily) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDeleteColumnFamily(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postDeleteColumnFamily(this, tableName, columnFamily); } }); } @@ -540,104 +533,94 @@ public class MasterCoprocessorHost final byte[] columnFamily, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteColumnFamilyAction(this, tableName, columnFamily); } }); } public void postCompletedDeleteColumnFamilyAction( final TableName tableName, final byte[] columnFamily, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedDeleteColumnFamilyAction(ctx, tableName, columnFamily); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedDeleteColumnFamilyAction(this, tableName, columnFamily); } }); } public void preEnableTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preEnableTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preEnableTable(this, tableName); } }); } public void postEnableTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postEnableTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postEnableTable(this, tableName); } }); } public void preEnableTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preEnableTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preEnableTableAction(this, tableName); } }); } public void postCompletedEnableTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedEnableTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedEnableTableAction(this, tableName); } }); } public void preDisableTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDisableTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preDisableTable(this, tableName); } }); } public void postDisableTable(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDisableTable(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postDisableTable(this, tableName); } }); } public void preDisableTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDisableTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preDisableTableAction(this, tableName); } }); } public void postCompletedDisableTableAction(final TableName tableName, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedDisableTableAction(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedDisableTableAction(this, tableName); } }); } @@ -645,208 +628,188 @@ public class MasterCoprocessorHost public boolean preAbortProcedure( final ProcedureExecutor procEnv, final long procId) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preAbortProcedure(ctx, procEnv, procId); + public void call(MasterObserver observer) throws IOException { + observer.preAbortProcedure(this, procEnv, procId); } }); } public void postAbortProcedure() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postAbortProcedure(ctx); + public void call(MasterObserver observer) throws IOException { + observer.postAbortProcedure(this); } }); } public boolean preGetProcedures() throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetProcedures(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preGetProcedures(this); } }); } public void postGetProcedures(final List> procInfoList) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetProcedures(ctx, procInfoList); + public void call(MasterObserver observer) throws IOException { + observer.postGetProcedures(this, procInfoList); } }); } public boolean preGetLocks() throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetLocks(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preGetLocks(this); } }); } public void postGetLocks(final List lockedResources) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetLocks(ctx, lockedResources); + public void call(MasterObserver observer) throws IOException { + observer.postGetLocks(this, lockedResources); } }); } public boolean preMove(final RegionInfo region, final ServerName srcServer, - final ServerName destServer) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + final ServerName destServer) throws IOException { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preMove(ctx, region, srcServer, destServer); + public void call(MasterObserver observer) throws IOException { + observer.preMove(this, region, srcServer, destServer); } }); } public void postMove(final RegionInfo region, final ServerName srcServer, final ServerName destServer) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postMove(ctx, region, srcServer, destServer); + public void call(MasterObserver observer) throws IOException { + observer.postMove(this, region, srcServer, destServer); } }); } public boolean preAssign(final RegionInfo regionInfo) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preAssign(ctx, regionInfo); + public void call(MasterObserver observer) throws IOException { + observer.preAssign(this, regionInfo); } }); } public void postAssign(final RegionInfo regionInfo) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postAssign(ctx, regionInfo); + public void call(MasterObserver observer) throws IOException { + observer.postAssign(this, regionInfo); } }); } public boolean preUnassign(final RegionInfo regionInfo, final boolean force) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preUnassign(ctx, regionInfo, force); + public void call(MasterObserver observer) throws IOException { + observer.preUnassign(this, regionInfo, force); } }); } public void postUnassign(final RegionInfo regionInfo, final boolean force) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postUnassign(ctx, regionInfo, force); + public void call(MasterObserver observer) throws IOException { + observer.postUnassign(this, regionInfo, force); } }); } public void preRegionOffline(final RegionInfo regionInfo) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preRegionOffline(ctx, regionInfo); + public void call(MasterObserver observer) throws IOException { + observer.preRegionOffline(this, regionInfo); } }); } public void postRegionOffline(final RegionInfo regionInfo) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postRegionOffline(ctx, regionInfo); + public void call(MasterObserver observer) throws IOException { + observer.postRegionOffline(this, regionInfo); } }); } public void preMergeRegions(final RegionInfo[] regionsToMerge) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preMergeRegions(ctx, regionsToMerge); + public void call(MasterObserver observer) throws IOException { + observer.preMergeRegions(this, regionsToMerge); } }); } public void postMergeRegions(final RegionInfo[] regionsToMerge) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postMergeRegions(ctx, regionsToMerge); + public void call(MasterObserver observer) throws IOException { + observer.postMergeRegions(this, regionsToMerge); } }); } public boolean preBalance() throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preBalance(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preBalance(this); } }); } public void postBalance(final List plans) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postBalance(ctx, plans); + public void call(MasterObserver observer) throws IOException { + observer.postBalance(this, plans); } }); } public boolean preSetSplitOrMergeEnabled(final boolean newValue, final MasterSwitchType switchType) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetSplitOrMergeEnabled(ctx, newValue, switchType); + public void call(MasterObserver observer) throws IOException { + observer.preSetSplitOrMergeEnabled(this, newValue, switchType); } }); } public void postSetSplitOrMergeEnabled(final boolean newValue, final MasterSwitchType switchType) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetSplitOrMergeEnabled(ctx, newValue, switchType); + public void call(MasterObserver observer) throws IOException { + observer.postSetSplitOrMergeEnabled(this, newValue, switchType); } }); } @@ -860,11 +823,10 @@ public class MasterCoprocessorHost public void preSplitRegion( final TableName tableName, final byte[] splitRow) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSplitRegion(ctx, tableName, splitRow); + public void call(MasterObserver observer) throws IOException { + observer.preSplitRegion(this, tableName, splitRow); } }); } @@ -880,11 +842,10 @@ public class MasterCoprocessorHost final TableName tableName, final byte[] splitRow, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSplitRegionAction(ctx, tableName, splitRow); + public void call(MasterObserver observer) throws IOException { + observer.preSplitRegionAction(this, tableName, splitRow); } }); } @@ -900,11 +861,10 @@ public class MasterCoprocessorHost final RegionInfo regionInfoA, final RegionInfo regionInfoB, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompletedSplitRegionAction(ctx, regionInfoA, regionInfoB); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedSplitRegionAction(this, regionInfoA, regionInfoB); } }); } @@ -920,11 +880,10 @@ public class MasterCoprocessorHost final byte[] splitKey, final List metaEntries, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSplitRegionBeforePONRAction(ctx, splitKey, metaEntries); + public void call(MasterObserver observer) throws IOException { + observer.preSplitRegionBeforePONRAction(this, splitKey, metaEntries); } }); } @@ -935,11 +894,10 @@ public class MasterCoprocessorHost * @throws IOException */ public void preSplitAfterPONRAction(final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSplitRegionAfterPONRAction(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preSplitRegionAfterPONRAction(this); } }); } @@ -950,11 +908,10 @@ public class MasterCoprocessorHost * @throws IOException */ public void postRollBackSplitRegionAction(final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postRollBackSplitRegionAction(ctx); + public void call(MasterObserver observer) throws IOException { + observer.postRollBackSplitRegionAction(this); } }); } @@ -967,11 +924,10 @@ public class MasterCoprocessorHost */ public boolean preMergeRegionsAction( final RegionInfo[] regionsToMerge, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preMergeRegionsAction(ctx, regionsToMerge); + public void call(MasterObserver observer) throws IOException { + observer.preMergeRegionsAction(this, regionsToMerge); } }); } @@ -987,11 +943,10 @@ public class MasterCoprocessorHost final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postCompletedMergeRegionsAction(ctx, regionsToMerge, mergedRegion); + public void call(MasterObserver observer) throws IOException { + observer.postCompletedMergeRegionsAction(this, regionsToMerge, mergedRegion); } }); } @@ -1007,11 +962,10 @@ public class MasterCoprocessorHost final RegionInfo[] regionsToMerge, final @MetaMutationAnnotation List metaEntries, final User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preMergeRegionsCommitAction(ctx, regionsToMerge, metaEntries); + public void call(MasterObserver observer) throws IOException { + observer.preMergeRegionsCommitAction(this, regionsToMerge, metaEntries); } }); } @@ -1027,11 +981,10 @@ public class MasterCoprocessorHost final RegionInfo[] regionsToMerge, final RegionInfo mergedRegion, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postMergeRegionsCommitAction(ctx, regionsToMerge, mergedRegion); + public void call(MasterObserver observer) throws IOException { + observer.postMergeRegionsCommitAction(this, regionsToMerge, mergedRegion); } }); } @@ -1044,33 +997,30 @@ public class MasterCoprocessorHost */ public void postRollBackMergeRegionsAction( final RegionInfo[] regionsToMerge, final User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postRollBackMergeRegionsAction(ctx, regionsToMerge); + public void call(MasterObserver observer) throws IOException { + observer.postRollBackMergeRegionsAction(this, regionsToMerge); } }); } public boolean preBalanceSwitch(final boolean b) throws IOException { - return execOperationWithResult(b, coprocessors.isEmpty() ? null : - new CoprocessorOperationWithResult() { - @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preBalanceSwitch(ctx, getResult())); - } - }); + return execOperationWithResult(b, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(masterObserverGetter) { + @Override + public Boolean call(MasterObserver observer) throws IOException { + return observer.preBalanceSwitch(this, getResult()); + } + }); } public void postBalanceSwitch(final boolean oldValue, final boolean newValue) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postBalanceSwitch(ctx, oldValue, newValue); + public void call(MasterObserver observer) throws IOException { + observer.postBalanceSwitch(this, oldValue, newValue); } }); } @@ -1078,16 +1028,15 @@ public class MasterCoprocessorHost public void preShutdown() throws IOException { // While stopping the cluster all coprocessors method should be executed first then the // coprocessor should be cleaned up. - execShutdown(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execShutdown(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preShutdown(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preShutdown(this); } @Override - public void postEnvCall(MasterEnvironment env) { + public void postEnvCall() { // invoke coprocessor stop method - shutdown(env); + shutdown(this.getEnvironment()); } }); } @@ -1095,228 +1044,207 @@ public class MasterCoprocessorHost public void preStopMaster() throws IOException { // While stopping master all coprocessors method should be executed first then the coprocessor // environment should be cleaned up. - execShutdown(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execShutdown(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preStopMaster(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preStopMaster(this); } @Override - public void postEnvCall(MasterEnvironment env) { + public void postEnvCall() { // invoke coprocessor stop method - shutdown(env); + shutdown(this.getEnvironment()); } }); } public void preMasterInitialization() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preMasterInitialization(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preMasterInitialization(this); } }); } public void postStartMaster() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postStartMaster(ctx); + public void call(MasterObserver observer) throws IOException { + observer.postStartMaster(this); } }); } public void preSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.preSnapshot(this, snapshot, hTableDescriptor); } }); } public void postSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.postSnapshot(this, snapshot, hTableDescriptor); } }); } public void preListSnapshot(final SnapshotDescription snapshot) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preListSnapshot(ctx, snapshot); + public void call(MasterObserver observer) throws IOException { + observer.preListSnapshot(this, snapshot); } }); } public void postListSnapshot(final SnapshotDescription snapshot) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postListSnapshot(ctx, snapshot); + public void call(MasterObserver observer) throws IOException { + observer.postListSnapshot(this, snapshot); } }); } public void preCloneSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCloneSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.preCloneSnapshot(this, snapshot, hTableDescriptor); } }); } public void postCloneSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCloneSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.postCloneSnapshot(this, snapshot, hTableDescriptor); } }); } public void preRestoreSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preRestoreSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.preRestoreSnapshot(this, snapshot, hTableDescriptor); } }); } public void postRestoreSnapshot(final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postRestoreSnapshot(ctx, snapshot, hTableDescriptor); + public void call(MasterObserver observer) throws IOException { + observer.postRestoreSnapshot(this, snapshot, hTableDescriptor); } }); } public void preDeleteSnapshot(final SnapshotDescription snapshot) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDeleteSnapshot(ctx, snapshot); + public void call(MasterObserver observer) throws IOException { + observer.preDeleteSnapshot(this, snapshot); } }); } public void postDeleteSnapshot(final SnapshotDescription snapshot) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDeleteSnapshot(ctx, snapshot); + public void call(MasterObserver observer) throws IOException { + observer.postDeleteSnapshot(this, snapshot); } }); } public boolean preGetTableDescriptors(final List tableNamesList, final List descriptors, final String regex) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetTableDescriptors(ctx, tableNamesList, descriptors, regex); + public void call(MasterObserver observer) throws IOException { + observer.preGetTableDescriptors(this, tableNamesList, descriptors, regex); } }); } public void postGetTableDescriptors(final List tableNamesList, final List descriptors, final String regex) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetTableDescriptors(ctx, tableNamesList, descriptors, regex); + public void call(MasterObserver observer) throws IOException { + observer.postGetTableDescriptors(this, tableNamesList, descriptors, regex); } }); } public boolean preGetTableNames(final List descriptors, final String regex) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetTableNames(ctx, descriptors, regex); + public void call(MasterObserver observer) throws IOException { + observer.preGetTableNames(this, descriptors, regex); } }); } public void postGetTableNames(final List descriptors, final String regex) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetTableNames(ctx, descriptors, regex); + public void call(MasterObserver observer) throws IOException { + observer.postGetTableNames(this, descriptors, regex); } }); } public void preTableFlush(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preTableFlush(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.preTableFlush(this, tableName); } }); } public void postTableFlush(final TableName tableName) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postTableFlush(ctx, tableName); + public void call(MasterObserver observer) throws IOException { + observer.postTableFlush(this, tableName); } }); } public void preSetUserQuota( final String user, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetUserQuota(ctx, user, quotas); + public void call(MasterObserver observer) throws IOException { + observer.preSetUserQuota(this, user, quotas); } }); } public void postSetUserQuota( final String user, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetUserQuota(ctx, user, quotas); + public void call(MasterObserver observer) throws IOException { + observer.postSetUserQuota(this, user, quotas); } }); } @@ -1324,11 +1252,10 @@ public class MasterCoprocessorHost public void preSetUserQuota( final String user, final TableName table, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetUserQuota(ctx, user, table, quotas); + public void call(MasterObserver observer) throws IOException { + observer.preSetUserQuota(this, user, table, quotas); } }); } @@ -1336,11 +1263,10 @@ public class MasterCoprocessorHost public void postSetUserQuota( final String user, final TableName table, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetUserQuota(ctx, user, table, quotas); + public void call(MasterObserver observer) throws IOException { + observer.postSetUserQuota(this, user, table, quotas); } }); } @@ -1348,11 +1274,10 @@ public class MasterCoprocessorHost public void preSetUserQuota( final String user, final String namespace, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetUserQuota(ctx, user, namespace, quotas); + public void call(MasterObserver observer) throws IOException { + observer.preSetUserQuota(this, user, namespace, quotas); } }); } @@ -1360,186 +1285,73 @@ public class MasterCoprocessorHost public void postSetUserQuota( final String user, final String namespace, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetUserQuota(ctx, user, namespace, quotas); + public void call(MasterObserver observer) throws IOException { + observer.postSetUserQuota(this, user, namespace, quotas); } }); } public void preSetTableQuota( final TableName table, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetTableQuota(ctx, table, quotas); + public void call(MasterObserver observer) throws IOException { + observer.preSetTableQuota(this, table, quotas); } }); } public void postSetTableQuota( final TableName table, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetTableQuota(ctx, table, quotas); + public void call(MasterObserver observer) throws IOException { + observer.postSetTableQuota(this, table, quotas); } }); } public void preSetNamespaceQuota( final String namespace, final GlobalQuotaSettings quotas) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preSetNamespaceQuota(ctx, namespace, quotas); + public void call(MasterObserver observer) throws IOException { + observer.preSetNamespaceQuota(this, namespace, quotas); } }); } public void postSetNamespaceQuota( final String namespace, final GlobalQuotaSettings quotas) throws IOException{ - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postSetNamespaceQuota(ctx, namespace, quotas); + public void call(MasterObserver observer) throws IOException { + observer.postSetNamespaceQuota(this, namespace, quotas); } }); } - private static abstract class CoprocessorOperation - extends ObserverContext { - public CoprocessorOperation() { - this(RpcServer.getRequestUser()); - } - - public CoprocessorOperation(User user) { - super(user); - } - - public abstract void call(MasterObserver oserver, - ObserverContext ctx) throws IOException; - - public void postEnvCall(MasterEnvironment env) { - } - } - - private static abstract class CoprocessorOperationWithResult extends CoprocessorOperation { - private T result = null; - public void setResult(final T result) { this.result = result; } - public T getResult() { return this.result; } - } - - private T execOperationWithResult(final T defaultValue, - final CoprocessorOperationWithResult ctx) throws IOException { - if (ctx == null) return defaultValue; - ctx.setResult(defaultValue); - execOperation(ctx); - return ctx.getResult(); - } - - private boolean execOperation(final CoprocessorOperation ctx) throws IOException { - if (ctx == null) return false; - boolean bypass = false; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - MasterEnvironment env = envs.get(i); - if (env.getInstance() instanceof MasterObserver) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.call((MasterObserver)env.getInstance(), ctx); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } - } - ctx.postEnvCall(env); - } - return bypass; - } - - /** - * Master coprocessor classes can be configured in any order, based on that priority is set and - * chained in a sorted order. For preStopMaster()/preShutdown(), coprocessor methods are invoked - * in call() and environment is shutdown in postEnvCall().
    - * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors - * may remain shutdown if any exception occurs during next coprocessor execution which prevent - * Master stop or cluster shutdown. (Refer: - * HBASE-16663 - * @param ctx CoprocessorOperation - * @return true if bypaas coprocessor execution, false if not. - * @throws IOException - */ - private boolean execShutdown(final CoprocessorOperation ctx) throws IOException { - if (ctx == null) return false; - boolean bypass = false; - List envs = coprocessors.get(); - int envsSize = envs.size(); - // Iterate the coprocessors and execute CoprocessorOperation's call() - for (int i = 0; i < envsSize; i++) { - MasterEnvironment env = envs.get(i); - if (env.getInstance() instanceof MasterObserver) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.call((MasterObserver) env.getInstance(), ctx); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } - } - } - - // Iterate the coprocessors and execute CoprocessorOperation's postEnvCall() - for (int i = 0; i < envsSize; i++) { - MasterEnvironment env = envs.get(i); - ctx.postEnvCall(env); - } - return bypass; - } - - public void preMoveServersAndTables(final Set

    servers, final Set tables, final String targetGroup) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + public void preMoveServersAndTables(final Set
    servers, final Set tables, + final String targetGroup) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preMoveServersAndTables(ctx, servers, tables, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preMoveServersAndTables(this, servers, tables, targetGroup); } } }); } - public void postMoveServersAndTables(final Set
    servers, final Set tables, final String targetGroup) - throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + public void postMoveServersAndTables(final Set
    servers, final Set tables, + final String targetGroup) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.postMoveServersAndTables(ctx, servers, tables, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postMoveServersAndTables(this, servers, tables, targetGroup); } } }); @@ -1547,12 +1359,11 @@ public class MasterCoprocessorHost public void preMoveServers(final Set
    servers, final String targetGroup) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preMoveServers(ctx, servers, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preMoveServers(this, servers, targetGroup); } } }); @@ -1560,12 +1371,11 @@ public class MasterCoprocessorHost public void postMoveServers(final Set
    servers, final String targetGroup) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.postMoveServers(ctx, servers, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postMoveServers(this, servers, targetGroup); } } }); @@ -1573,12 +1383,11 @@ public class MasterCoprocessorHost public void preMoveTables(final Set tables, final String targetGroup) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preMoveTables(ctx, tables, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preMoveTables(this, tables, targetGroup); } } }); @@ -1586,12 +1395,11 @@ public class MasterCoprocessorHost public void postMoveTables(final Set tables, final String targetGroup) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.postMoveTables(ctx, tables, targetGroup); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postMoveTables(this, tables, targetGroup); } } }); @@ -1599,12 +1407,11 @@ public class MasterCoprocessorHost public void preAddRSGroup(final String name) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preAddRSGroup(ctx, name); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preAddRSGroup(this, name); } } }); @@ -1612,12 +1419,11 @@ public class MasterCoprocessorHost public void postAddRSGroup(final String name) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) { - oserver.postAddRSGroup(ctx, name); + public void call(MasterObserver observer) throws IOException { + if (((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postAddRSGroup(this, name); } } }); @@ -1625,12 +1431,11 @@ public class MasterCoprocessorHost public void preRemoveRSGroup(final String name) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preRemoveRSGroup(ctx, name); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preRemoveRSGroup(this, name); } } }); @@ -1638,12 +1443,11 @@ public class MasterCoprocessorHost public void postRemoveRSGroup(final String name) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.postRemoveRSGroup(ctx, name); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postRemoveRSGroup(this, name); } } }); @@ -1651,12 +1455,11 @@ public class MasterCoprocessorHost public void preBalanceRSGroup(final String name) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.preBalanceRSGroup(ctx, name); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.preBalanceRSGroup(this, name); } } }); @@ -1664,12 +1467,11 @@ public class MasterCoprocessorHost public void postBalanceRSGroup(final String name, final boolean balanceRan) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, - ObserverContext ctx) throws IOException { - if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { - oserver.postBalanceRSGroup(ctx, name, balanceRan); + public void call(MasterObserver observer) throws IOException { + if(((MasterEnvironment)getEnvironment()).supportGroupCPs) { + observer.postBalanceRSGroup(this, name, balanceRan); } } }); @@ -1677,226 +1479,204 @@ public class MasterCoprocessorHost public void preAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preAddReplicationPeer(ctx, peerId, peerConfig); + public void call(MasterObserver observer) throws IOException { + observer.preAddReplicationPeer(this, peerId, peerConfig); } }); } public void postAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postAddReplicationPeer(ctx, peerId, peerConfig); + public void call(MasterObserver observer) throws IOException { + observer.postAddReplicationPeer(this, peerId, peerConfig); } }); } public void preRemoveReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preRemoveReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.preRemoveReplicationPeer(this, peerId); } }); } public void postRemoveReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postRemoveReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.postRemoveReplicationPeer(this, peerId); } }); } public void preEnableReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preEnableReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.preEnableReplicationPeer(this, peerId); } }); } public void postEnableReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postEnableReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.postEnableReplicationPeer(this, peerId); } }); } public void preDisableReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preDisableReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.preDisableReplicationPeer(this, peerId); } }); } public void postDisableReplicationPeer(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postDisableReplicationPeer(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.postDisableReplicationPeer(this, peerId); } }); } public void preGetReplicationPeerConfig(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preGetReplicationPeerConfig(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.preGetReplicationPeerConfig(this, peerId); } }); } public void postGetReplicationPeerConfig(final String peerId) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postGetReplicationPeerConfig(ctx, peerId); + public void call(MasterObserver observer) throws IOException { + observer.postGetReplicationPeerConfig(this, peerId); } }); } public void preUpdateReplicationPeerConfig(final String peerId, final ReplicationPeerConfig peerConfig) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preUpdateReplicationPeerConfig(ctx, peerId, peerConfig); + public void call(MasterObserver observer) throws IOException { + observer.preUpdateReplicationPeerConfig(this, peerId, peerConfig); } }); } public void postUpdateReplicationPeerConfig(final String peerId, final ReplicationPeerConfig peerConfig) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postUpdateReplicationPeerConfig(ctx, peerId, peerConfig); + public void call(MasterObserver observer) throws IOException { + observer.postUpdateReplicationPeerConfig(this, peerId, peerConfig); } }); } public void preListReplicationPeers(final String regex) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.preListReplicationPeers(ctx, regex); + public void call(MasterObserver observer) throws IOException { + observer.preListReplicationPeers(this, regex); } }); } public void postListReplicationPeers(final String regex) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver observer, ObserverContext ctx) - throws IOException { - observer.postListReplicationPeers(ctx, regex); + public void call(MasterObserver observer) throws IOException { + observer.postListReplicationPeers(this, regex); } }); } public void preRequestLock(String namespace, TableName tableName, RegionInfo[] regionInfos, LockType type, String description) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preRequestLock(ctx, namespace, tableName, regionInfos, type, description); + public void call(MasterObserver observer) throws IOException { + observer.preRequestLock(this, namespace, tableName, regionInfos, type, description); } }); } public void postRequestLock(String namespace, TableName tableName, RegionInfo[] regionInfos, LockType type, String description) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postRequestLock(ctx, namespace, tableName, regionInfos, type, description); + public void call(MasterObserver observer) throws IOException { + observer.postRequestLock(this, namespace, tableName, regionInfos, type, description); } }); } public void preLockHeartbeat(LockProcedure proc, boolean keepAlive) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preLockHeartbeat(ctx, proc, keepAlive); + public void call(MasterObserver observer) throws IOException { + observer.preLockHeartbeat(this, proc, keepAlive); } }); } public void postLockHeartbeat(LockProcedure proc, boolean keepAlive) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postLockHeartbeat(ctx, proc, keepAlive); + public void call(MasterObserver observer) throws IOException { + observer.postLockHeartbeat(this, proc, keepAlive); } }); } public void preListDeadServers() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preListDeadServers(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preListDeadServers(this); } }); } public void postListDeadServers() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postListDeadServers(ctx); + public void call(MasterObserver observer) throws IOException { + observer.postListDeadServers(this); } }); } public void preClearDeadServers() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preClearDeadServers(ctx); + public void call(MasterObserver observer) throws IOException { + observer.preClearDeadServers(this); } }); } public void postClearDeadServers() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override - public void call(MasterObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postClearDeadServers(ctx); + public void call(MasterObserver observer) throws IOException { + observer.postClearDeadServers(this); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java index c59e5e78c94..d6dbcd4bfcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -24,6 +25,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -35,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; * are deleted. */ @InterfaceAudience.Private -public class MasterSpaceQuotaObserver implements MasterObserver { +public class MasterSpaceQuotaObserver implements MasterCoprocessor, MasterObserver { public static final String REMOVE_QUOTA_ON_TABLE_DELETE = "hbase.quota.remove.on.table.delete"; public static final boolean REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT = true; @@ -43,6 +45,11 @@ public class MasterSpaceQuotaObserver implements MasterObserver { private Configuration conf; private boolean quotasEnabled = false; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void start(CoprocessorEnvironment ctx) throws IOException { this.cpEnv = ctx; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 19491b4bbb1..84e9aa53671 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -33,7 +33,6 @@ import com.google.protobuf.Message; import com.google.protobuf.Service; import org.apache.commons.collections4.map.AbstractReferenceMap; import org.apache.commons.collections4.map.ReferenceMap; -import org.apache.commons.lang3.ClassUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -56,11 +55,15 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.BaseEnvironment; +import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType; @@ -68,7 +71,6 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; @@ -91,7 +93,7 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public class RegionCoprocessorHost - extends CoprocessorHost { + extends CoprocessorHost { private static final Log LOG = LogFactory.getLog(RegionCoprocessorHost.class); // The shared data map @@ -103,10 +105,10 @@ public class RegionCoprocessorHost private final boolean hasCustomPostScannerFilterRow; /** - * + * * Encapsulation of the environment of each coprocessor */ - static class RegionEnvironment extends CoprocessorHost.Environment + static class RegionEnvironment extends BaseEnvironment implements RegionCoprocessorEnvironment { private Region region; @@ -119,7 +121,7 @@ public class RegionCoprocessorHost * @param impl the coprocessor instance * @param priority chaining priority */ - public RegionEnvironment(final Coprocessor impl, final int priority, + public RegionEnvironment(final RegionCoprocessor impl, final int priority, final int seq, final Configuration conf, final Region region, final RegionServerServices services, final ConcurrentMap sharedData) { super(impl, priority, seq, conf); @@ -142,6 +144,7 @@ public class RegionCoprocessorHost return rsServices; } + @Override public void shutdown() { super.shutdown(); MetricsCoprocessor.removeRegistry(this.metricRegistry); @@ -226,7 +229,7 @@ public class RegionCoprocessorHost // now check whether any coprocessor implements postScannerFilterRow boolean hasCustomPostScannerFilterRow = false; - out: for (RegionEnvironment env: coprocessors) { + out: for (RegionCoprocessorEnvironment env: coprocEnvironments) { if (env.getInstance() instanceof RegionObserver) { Class clazz = env.getInstance().getClass(); for(;;) { @@ -361,13 +364,16 @@ public class RegionCoprocessorHost // scan the table attributes for coprocessor load specifications // initialize the coprocessors - List configured = new ArrayList<>(); + List configured = new ArrayList<>(); for (TableCoprocessorAttribute attr: getTableCoprocessorAttrsFromSchema(conf, region.getTableDescriptor())) { // Load encompasses classloading and coprocessor initialization try { - RegionEnvironment env = load(attr.getPath(), attr.getClassName(), attr.getPriority(), - attr.getConf()); + RegionCoprocessorEnvironment env = load(attr.getPath(), attr.getClassName(), + attr.getPriority(), attr.getConf()); + if (env == null) { + continue; + } configured.add(env); LOG.info("Loaded coprocessor " + attr.getClassName() + " from HTD of " + region.getTableDescriptor().getTableName().getNameAsString() + " successfully."); @@ -381,60 +387,101 @@ public class RegionCoprocessorHost } } // add together to coprocessor set for COW efficiency - coprocessors.addAll(configured); + coprocEnvironments.addAll(configured); } @Override - public RegionEnvironment createEnvironment(Class implClass, - Coprocessor instance, int priority, int seq, Configuration conf) { - // Check if it's an Endpoint. - // Due to current dynamic protocol design, Endpoint - // uses a different way to be registered and executed. - // It uses a visitor pattern to invoke registered Endpoint - // method. - for (Object itf : ClassUtils.getAllInterfaces(implClass)) { - Class c = (Class) itf; - if (CoprocessorService.class.isAssignableFrom(c)) { - region.registerService( ((CoprocessorService)instance).getService() ); - } - } + public RegionEnvironment createEnvironment(RegionCoprocessor instance, int priority, int seq, + Configuration conf) { + // Due to current dynamic protocol design, Endpoint uses a different way to be registered and + // executed. It uses a visitor pattern to invoke registered Endpoint method. + instance.getService().ifPresent(region::registerService); ConcurrentMap classData; // make sure only one thread can add maps synchronized (SHARED_DATA_MAP) { // as long as at least one RegionEnvironment holds on to its classData it will // remain in this map classData = - SHARED_DATA_MAP.computeIfAbsent(implClass.getName(), k -> new ConcurrentHashMap<>()); + SHARED_DATA_MAP.computeIfAbsent(instance.getClass().getName(), + k -> new ConcurrentHashMap<>()); } return new RegionEnvironment(instance, priority, seq, conf, region, rsServices, classData); } + @Override + public RegionCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if (RegionCoprocessor.class.isAssignableFrom(implClass)) { + return (RegionCoprocessor)implClass.newInstance(); + } else if (CoprocessorService.class.isAssignableFrom(implClass)) { + // For backward compatibility with old CoprocessorService impl which don't extend + // RegionCoprocessor. + return new CoprocessorServiceBackwardCompatiblity.RegionCoprocessorService( + (CoprocessorService)implClass.newInstance()); + } else { + LOG.error(implClass.getName() + " is not of type RegionCoprocessor. Check the " + + "configuration " + CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); + return null; + } + } + + private ObserverGetter regionObserverGetter = + RegionCoprocessor::getRegionObserver; + + private ObserverGetter endpointObserverGetter = + RegionCoprocessor::getEndpointObserver; + + abstract class RegionObserverOperation extends ObserverOperationWithoutResult { + public RegionObserverOperation() { + super(regionObserverGetter); + } + + public RegionObserverOperation(User user) { + super(regionObserverGetter, user); + } + } + + abstract class BulkLoadObserverOperation extends + ObserverOperationWithoutResult { + public BulkLoadObserverOperation(User user) { + super(RegionCoprocessor::getBulkLoadObserver, user); + } + } + + + ////////////////////////////////////////////////////////////////////////////////////////////////// + // Observer operations + ////////////////////////////////////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////////////////////////////////////// + // Observer operations + ////////////////////////////////////////////////////////////////////////////////////////////////// + /** * Invoked before a region open. * * @throws IOException Signals that an I/O exception has occurred. */ public void preOpen() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preOpen(ctx); + public void call(RegionObserver observer) throws IOException { + observer.preOpen(this); } }); } + /** * Invoked after a region open */ public void postOpen() { try { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postOpen(ctx); + public void call(RegionObserver observer) throws IOException { + observer.postOpen(this); } }); } catch (IOException e) { @@ -447,11 +494,10 @@ public class RegionCoprocessorHost */ public void postLogReplay() { try { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postLogReplay(ctx); + public void call(RegionObserver observer) throws IOException { + observer.postLogReplay(this); } }); } catch (IOException e) { @@ -464,11 +510,10 @@ public class RegionCoprocessorHost * @param abortRequested true if the server is aborting */ public void preClose(final boolean abortRequested) throws IOException { - execOperation(false, new RegionOperation() { + execOperation(false, new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preClose(ctx, abortRequested); + public void call(RegionObserver observer) throws IOException { + observer.preClose(this, abortRequested); } }); } @@ -479,14 +524,15 @@ public class RegionCoprocessorHost */ public void postClose(final boolean abortRequested) { try { - execOperation(false, new RegionOperation() { + execOperation(false, new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postClose(ctx, abortRequested); + public void call(RegionObserver observer) throws IOException { + observer.postClose(this, abortRequested); } - public void postEnvCall(RegionEnvironment env) { - shutdown(env); + + @Override + public void postEnvCall() { + shutdown(this.getEnvironment()); } }); } catch (IOException e) { @@ -499,18 +545,19 @@ public class RegionCoprocessorHost * {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, * InternalScanner, CompactionLifeCycleTracker, long)} */ - public InternalScanner preCompactScannerOpen(HStore store, List scanners, - ScanType scanType, long earliestPutTs, CompactionLifeCycleTracker tracker, User user, - long readPoint) throws IOException { - return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult(user) { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCompactScannerOpen(ctx, store, scanners, scanType, - earliestPutTs, getResult(), tracker, readPoint)); - } - }); + public InternalScanner preCompactScannerOpen(final HStore store, + final List scanners, final ScanType scanType, final long earliestPutTs, + final CompactionLifeCycleTracker tracker, final User user, final long readPoint) + throws IOException { + return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult( + regionObserverGetter, user) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preCompactScannerOpen(this, store, scanners, scanType, + earliestPutTs, getResult(), tracker, readPoint); + } + }); } /** @@ -522,13 +569,12 @@ public class RegionCoprocessorHost * @return If {@code true}, skip the normal selection process and use the current list * @throws IOException */ - public boolean preCompactSelection(HStore store, List candidates, - CompactionLifeCycleTracker tracker, User user) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { + public boolean preCompactSelection(final HStore store, final List candidates, + final CompactionLifeCycleTracker tracker, final User user) throws IOException { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCompactSelection(ctx, store, candidates, tracker); + public void call(RegionObserver observer) throws IOException { + observer.preCompactSelection(this, store, candidates, tracker); } }); } @@ -540,13 +586,12 @@ public class RegionCoprocessorHost * @param selected The store files selected to compact * @param tracker used to track the life cycle of a compaction */ - public void postCompactSelection(HStore store, ImmutableList selected, - CompactionLifeCycleTracker tracker, User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { + public void postCompactSelection(final HStore store, final ImmutableList selected, + final CompactionLifeCycleTracker tracker, final User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompactSelection(ctx, store, selected, tracker); + public void call(RegionObserver observer) throws IOException { + observer.postCompactSelection(this, store, selected, tracker); } }); } @@ -559,16 +604,17 @@ public class RegionCoprocessorHost * @param tracker used to track the life cycle of a compaction * @throws IOException */ - public InternalScanner preCompact(HStore store, InternalScanner scanner, ScanType scanType, - CompactionLifeCycleTracker tracker, User user) throws IOException { - return execOperationWithResult(false, scanner, - coprocessors.isEmpty() ? null : new RegionOperationWithResult(user) { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCompact(ctx, store, getResult(), scanType, tracker)); - } - }); + public InternalScanner preCompact(final HStore store, final InternalScanner scanner, + final ScanType scanType, final CompactionLifeCycleTracker tracker, final User user) + throws IOException { + return execOperationWithResult(false, scanner, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult( + regionObserverGetter, user) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preCompact(this, store, getResult(), scanType, tracker); + } + }); } /** @@ -578,13 +624,12 @@ public class RegionCoprocessorHost * @param tracker used to track the life cycle of a compaction * @throws IOException */ - public void postCompact(HStore store, HStoreFile resultFile, CompactionLifeCycleTracker tracker, - User user) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) { + public void postCompact(final HStore store, final HStoreFile resultFile, + final CompactionLifeCycleTracker tracker, final User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCompact(ctx, store, resultFile, tracker); + public void call(RegionObserver observer) throws IOException { + observer.postCompact(this, store, resultFile, tracker); } }); } @@ -595,14 +640,13 @@ public class RegionCoprocessorHost */ public InternalScanner preFlush(HStore store, final InternalScanner scanner) throws IOException { - return execOperationWithResult(false, scanner, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preFlush(ctx, store, getResult())); - } - }); + return execOperationWithResult(false, scanner, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preFlush(this, store, getResult()); + } + }); } /** @@ -610,11 +654,10 @@ public class RegionCoprocessorHost * @throws IOException */ public void preFlush() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preFlush(ctx); + public void call(RegionObserver observer) throws IOException { + observer.preFlush(this); } }); } @@ -623,16 +666,15 @@ public class RegionCoprocessorHost * See * {@link RegionObserver#preFlushScannerOpen(ObserverContext, Store, List, InternalScanner, long)} */ - public InternalScanner preFlushScannerOpen(HStore store, List scanners, - long readPoint) throws IOException { - return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preFlushScannerOpen(ctx, store, scanners, getResult(), readPoint)); - } - }); + public InternalScanner preFlushScannerOpen(final HStore store, + final List scanners, final long readPoint) throws IOException { + return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preFlushScannerOpen(this, store, scanners, getResult(), readPoint); + } + }); } /** @@ -640,11 +682,10 @@ public class RegionCoprocessorHost * @throws IOException */ public void postFlush() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postFlush(ctx); + public void call(RegionObserver observer) throws IOException { + observer.postFlush(this); } }); } @@ -653,12 +694,11 @@ public class RegionCoprocessorHost * Invoked after a memstore flush * @throws IOException */ - public void postFlush(HStore store, HStoreFile storeFile) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + public void postFlush(final HStore store, final HStoreFile storeFile) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postFlush(ctx, store, storeFile); + public void call(RegionObserver observer) throws IOException { + observer.postFlush(this, store, storeFile); } }); } @@ -671,11 +711,10 @@ public class RegionCoprocessorHost */ public boolean preGet(final Get get, final List results) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preGetOp(ctx, get, results); + public void call(RegionObserver observer) throws IOException { + observer.preGetOp(this, get, results); } }); } @@ -687,11 +726,10 @@ public class RegionCoprocessorHost */ public void postGet(final Get get, final List results) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postGetOp(ctx, get, results); + public void call(RegionObserver observer) throws IOException { + observer.postGetOp(this, get, results); } }); } @@ -703,14 +741,13 @@ public class RegionCoprocessorHost * @exception IOException Exception */ public Boolean preExists(final Get get) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preExists(ctx, get, getResult())); - } - }); + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preExists(this, get, getResult()); + } + }); } /** @@ -721,14 +758,13 @@ public class RegionCoprocessorHost */ public boolean postExists(final Get get, boolean exists) throws IOException { - return execOperationWithResult(exists, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postExists(ctx, get, getResult())); - } - }); + return execOperationWithResult(exists, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postExists(this, get, getResult()); + } + }); } /** @@ -740,11 +776,10 @@ public class RegionCoprocessorHost */ public boolean prePut(final Put put, final WALEdit edit, final Durability durability) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.prePut(ctx, put, edit, durability); + public void call(RegionObserver observer) throws IOException { + observer.prePut(this, put, edit, durability); } }); } @@ -761,11 +796,10 @@ public class RegionCoprocessorHost */ public boolean prePrepareTimeStampForDeleteVersion(final Mutation mutation, final Cell kv, final byte[] byteNow, final Get get) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.prePrepareTimeStampForDeleteVersion(ctx, mutation, kv, byteNow, get); + public void call(RegionObserver observer) throws IOException { + observer.prePrepareTimeStampForDeleteVersion(this, mutation, kv, byteNow, get); } }); } @@ -778,11 +812,10 @@ public class RegionCoprocessorHost */ public void postPut(final Put put, final WALEdit edit, final Durability durability) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postPut(ctx, put, edit, durability); + public void call(RegionObserver observer) throws IOException { + observer.postPut(this, put, edit, durability); } }); } @@ -796,11 +829,10 @@ public class RegionCoprocessorHost */ public boolean preDelete(final Delete delete, final WALEdit edit, final Durability durability) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preDelete(ctx, delete, edit, durability); + public void call(RegionObserver observer) throws IOException { + observer.preDelete(this, delete, edit, durability); } }); } @@ -813,11 +845,10 @@ public class RegionCoprocessorHost */ public void postDelete(final Delete delete, final WALEdit edit, final Durability durability) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postDelete(ctx, delete, edit, durability); + public void call(RegionObserver observer) throws IOException { + observer.postDelete(this, delete, edit, durability); } }); } @@ -829,11 +860,10 @@ public class RegionCoprocessorHost */ public boolean preBatchMutate( final MiniBatchOperationInProgress miniBatchOp) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preBatchMutate(ctx, miniBatchOp); + public void call(RegionObserver observer) throws IOException { + observer.preBatchMutate(this, miniBatchOp); } }); } @@ -844,11 +874,10 @@ public class RegionCoprocessorHost */ public void postBatchMutate( final MiniBatchOperationInProgress miniBatchOp) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postBatchMutate(ctx, miniBatchOp); + public void call(RegionObserver observer) throws IOException { + observer.postBatchMutate(this, miniBatchOp); } }); } @@ -856,11 +885,10 @@ public class RegionCoprocessorHost public void postBatchMutateIndispensably( final MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postBatchMutateIndispensably(ctx, miniBatchOp, success); + public void call(RegionObserver observer) throws IOException { + observer.postBatchMutateIndispensably(this, miniBatchOp, success); } }); } @@ -880,15 +908,14 @@ public class RegionCoprocessorHost final byte [] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Put put) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCheckAndPut(ctx, row, family, qualifier, - op, comparator, put, getResult())); - } - }); + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preCheckAndPut(this, row, family, qualifier, + op, comparator, put, getResult()); + } + }); } /** @@ -902,18 +929,17 @@ public class RegionCoprocessorHost * be bypassed, or null otherwise * @throws IOException e */ - public Boolean preCheckAndPutAfterRowLock(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, - final Put put) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCheckAndPutAfterRowLock(ctx, row, family, qualifier, - op, comparator, put, getResult())); - } - }); + public Boolean preCheckAndPutAfterRowLock( + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final ByteArrayComparable comparator, final Put put) throws IOException { + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preCheckAndPutAfterRowLock(this, row, family, qualifier, + op, comparator, put, getResult()); + } + }); } /** @@ -929,15 +955,14 @@ public class RegionCoprocessorHost final byte [] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Put put, boolean result) throws IOException { - return execOperationWithResult(result, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postCheckAndPut(ctx, row, family, qualifier, - op, comparator, put, getResult())); - } - }); + return execOperationWithResult(result, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postCheckAndPut(this, row, family, qualifier, + op, comparator, put, getResult()); + } + }); } /** @@ -955,15 +980,14 @@ public class RegionCoprocessorHost final byte [] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Delete delete) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCheckAndDelete(ctx, row, family, - qualifier, op, comparator, delete, getResult())); - } - }); + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preCheckAndDelete(this, row, family, + qualifier, op, comparator, delete, getResult()); + } + }); } /** @@ -978,17 +1002,16 @@ public class RegionCoprocessorHost * @throws IOException e */ public Boolean preCheckAndDeleteAfterRowLock(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, - final Delete delete) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preCheckAndDeleteAfterRowLock(ctx, row, - family, qualifier, op, comparator, delete, getResult())); - } - }); + final byte[] qualifier, final CompareOperator op, final ByteArrayComparable comparator, + final Delete delete) throws IOException { + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preCheckAndDeleteAfterRowLock(this, row, + family, qualifier, op, comparator, delete, getResult()); + } + }); } /** @@ -1004,15 +1027,14 @@ public class RegionCoprocessorHost final byte [] qualifier, final CompareOperator op, final ByteArrayComparable comparator, final Delete delete, boolean result) throws IOException { - return execOperationWithResult(result, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postCheckAndDelete(ctx, row, family, - qualifier, op, comparator, delete, getResult())); - } - }); + return execOperationWithResult(result, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postCheckAndDelete(this, row, family, + qualifier, op, comparator, delete, getResult()); + } + }); } /** @@ -1022,14 +1044,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result preAppend(final Append append) throws IOException { - return execOperationWithResult(true, null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preAppend(ctx, append)); - } - }); + return execOperationWithResult(true, null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.preAppend(this, append); + } + }); } /** @@ -1039,14 +1060,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result preAppendAfterRowLock(final Append append) throws IOException { - return execOperationWithResult(true, null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preAppendAfterRowLock(ctx, append)); - } - }); + return execOperationWithResult(true, null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.preAppendAfterRowLock(this, append); + } + }); } /** @@ -1056,14 +1076,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result preIncrement(final Increment increment) throws IOException { - return execOperationWithResult(true, null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preIncrement(ctx, increment)); - } - }); + return execOperationWithResult(true, null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.preIncrement(this, increment); + } + }); } /** @@ -1073,14 +1092,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result preIncrementAfterRowLock(final Increment increment) throws IOException { - return execOperationWithResult(true, null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preIncrementAfterRowLock(ctx, increment)); - } - }); + return execOperationWithResult(true, null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.preIncrementAfterRowLock(this, increment); + } + }); } /** @@ -1089,14 +1107,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result postAppend(final Append append, final Result result) throws IOException { - return execOperationWithResult(result, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postAppend(ctx, append, result)); - } - }); + return execOperationWithResult(result, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.postAppend(this, append, result); + } + }); } /** @@ -1105,14 +1122,13 @@ public class RegionCoprocessorHost * @throws IOException if an error occurred on the coprocessor */ public Result postIncrement(final Increment increment, Result result) throws IOException { - return execOperationWithResult(result, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postIncrement(ctx, increment, getResult())); - } - }); + return execOperationWithResult(result, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Result call(RegionObserver observer) throws IOException { + return observer.postIncrement(this, increment, getResult()); + } + }); } /** @@ -1122,30 +1138,28 @@ public class RegionCoprocessorHost * @exception IOException Exception */ public RegionScanner preScannerOpen(final Scan scan) throws IOException { - return execOperationWithResult(true, null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preScannerOpen(ctx, scan, getResult())); - } - }); + return execOperationWithResult(true, null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public RegionScanner call(RegionObserver observer) throws IOException { + return observer.preScannerOpen(this, scan, getResult()); + } + }); } /** * See * {@link RegionObserver#preStoreScannerOpen(ObserverContext, Store, Scan, NavigableSet, KeyValueScanner, long)} */ - public KeyValueScanner preStoreScannerOpen(HStore store, Scan scan, - NavigableSet targetCols, long readPt) throws IOException { - return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preStoreScannerOpen(ctx, store, scan, targetCols, getResult(), readPt)); - } - }); + public KeyValueScanner preStoreScannerOpen(final HStore store, final Scan scan, + final NavigableSet targetCols, final long readPt) throws IOException { + return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public KeyValueScanner call(RegionObserver observer) throws IOException { + return observer.preStoreScannerOpen(this, store, scan, targetCols, getResult(), readPt); + } + }); } /** @@ -1155,14 +1169,13 @@ public class RegionCoprocessorHost * @exception IOException Exception */ public RegionScanner postScannerOpen(final Scan scan, RegionScanner s) throws IOException { - return execOperationWithResult(s, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postScannerOpen(ctx, scan, getResult())); - } - }); + return execOperationWithResult(s, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public RegionScanner call(RegionObserver observer) throws IOException { + return observer.postScannerOpen(this, scan, getResult()); + } + }); } /** @@ -1175,14 +1188,13 @@ public class RegionCoprocessorHost */ public Boolean preScannerNext(final InternalScanner s, final List results, final int limit) throws IOException { - return execOperationWithResult(true, false, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preScannerNext(ctx, s, results, limit, getResult())); - } - }); + return execOperationWithResult(true, false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.preScannerNext(this, s, results, limit, getResult()); + } + }); } /** @@ -1196,14 +1208,13 @@ public class RegionCoprocessorHost public boolean postScannerNext(final InternalScanner s, final List results, final int limit, boolean hasMore) throws IOException { - return execOperationWithResult(hasMore, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postScannerNext(ctx, s, results, limit, getResult())); - } - }); + return execOperationWithResult(hasMore, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postScannerNext(this, s, results, limit, getResult()); + } + }); } /** @@ -1218,14 +1229,13 @@ public class RegionCoprocessorHost throws IOException { // short circuit for performance if (!hasCustomPostScannerFilterRow) return true; - return execOperationWithResult(true, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postScannerFilterRow(ctx, s, curRowCell, getResult())); - } - }); + return execOperationWithResult(true, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postScannerFilterRow(this, s, curRowCell, getResult()); + } + }); } /** @@ -1234,11 +1244,10 @@ public class RegionCoprocessorHost * @exception IOException Exception */ public boolean preScannerClose(final InternalScanner s) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preScannerClose(ctx, s); + public void call(RegionObserver observer) throws IOException { + observer.preScannerClose(this, s); } }); } @@ -1247,11 +1256,10 @@ public class RegionCoprocessorHost * @exception IOException Exception */ public void postScannerClose(final InternalScanner s) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postScannerClose(ctx, s); + public void call(RegionObserver observer) throws IOException { + observer.postScannerClose(this, s); } }); } @@ -1262,11 +1270,10 @@ public class RegionCoprocessorHost * @throws IOException Exception */ public void preReplayWALs(final RegionInfo info, final Path edits) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preReplayWALs(ctx, info, edits); + public void call(RegionObserver observer) throws IOException { + observer.preReplayWALs(this, info, edits); } }); } @@ -1277,11 +1284,10 @@ public class RegionCoprocessorHost * @throws IOException Exception */ public void postReplayWALs(final RegionInfo info, final Path edits) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postReplayWALs(ctx, info, edits); + public void call(RegionObserver observer) throws IOException { + observer.postReplayWALs(this, info, edits); } }); } @@ -1295,11 +1301,10 @@ public class RegionCoprocessorHost */ public boolean preWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preWALRestore(ctx, info, logKey, logEdit); + public void call(RegionObserver observer) throws IOException { + observer.preWALRestore(this, info, logKey, logEdit); } }); } @@ -1312,11 +1317,10 @@ public class RegionCoprocessorHost */ public void postWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postWALRestore(ctx, info, logKey, logEdit); + public void call(RegionObserver observer) throws IOException { + observer.postWALRestore(this, info, logKey, logEdit); } }); } @@ -1327,31 +1331,28 @@ public class RegionCoprocessorHost * @throws IOException */ public boolean preBulkLoadHFile(final List> familyPaths) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preBulkLoadHFile(ctx, familyPaths); + public void call(RegionObserver observer) throws IOException { + observer.preBulkLoadHFile(this, familyPaths); } }); } public boolean preCommitStoreFile(final byte[] family, final List> pairs) throws IOException { - return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.preCommitStoreFile(ctx, family, pairs); + public void call(RegionObserver observer) throws IOException { + observer.preCommitStoreFile(this, family, pairs); } }); } public void postCommitStoreFile(final byte[] family, Path srcPath, Path dstPath) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCommitStoreFile(ctx, family, srcPath, dstPath); + public void call(RegionObserver observer) throws IOException { + observer.postCommitStoreFile(this, family, srcPath, dstPath); } }); } @@ -1365,32 +1366,29 @@ public class RegionCoprocessorHost */ public boolean postBulkLoadHFile(final List> familyPaths, Map> map, boolean hasLoaded) throws IOException { - return execOperationWithResult(hasLoaded, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postBulkLoadHFile(ctx, familyPaths, map, getResult())); - } - }); + return execOperationWithResult(hasLoaded, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Boolean call(RegionObserver observer) throws IOException { + return observer.postBulkLoadHFile(this, familyPaths, map, getResult()); + } + }); } public void postStartRegionOperation(final Operation op) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postStartRegionOperation(ctx, op); + public void call(RegionObserver observer) throws IOException { + observer.postStartRegionOperation(this, op); } }); } public void postCloseRegionOperation(final Operation op) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation() { @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postCloseRegionOperation(ctx, op); + public void call(RegionObserver observer) throws IOException { + observer.postCloseRegionOperation(this, op); } }); } @@ -1409,14 +1407,14 @@ public class RegionCoprocessorHost public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r) throws IOException { - return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult())); - } - }); + return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public StoreFileReader call(RegionObserver observer) throws IOException { + return observer.preStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r, + getResult()); + } + }); } /** @@ -1433,192 +1431,77 @@ public class RegionCoprocessorHost public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r, final StoreFileReader reader) throws IOException { - return execOperationWithResult(reader, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, getResult())); - } - }); + return execOperationWithResult(reader, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public StoreFileReader call(RegionObserver observer) throws IOException { + return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r, + getResult()); + } + }); } public Cell postMutationBeforeWAL(final MutationType opType, final Mutation mutation, final Cell oldCell, Cell newCell) throws IOException { - return execOperationWithResult(newCell, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postMutationBeforeWAL(ctx, opType, mutation, oldCell, getResult())); - } - }); + return execOperationWithResult(newCell, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public Cell call(RegionObserver observer) throws IOException { + return observer.postMutationBeforeWAL(this, opType, mutation, oldCell, getResult()); + } + }); } public Message preEndpointInvocation(final Service service, final String methodName, Message request) throws IOException { - return execOperationWithResult(request, - coprocessors.isEmpty() ? null : new EndpointOperationWithResult() { - @Override - public void call(EndpointObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.preEndpointInvocation(ctx, service, methodName, getResult())); - } - }); + return execOperationWithResult(request, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(endpointObserverGetter) { + @Override + public Message call(EndpointObserver observer) throws IOException { + return observer.preEndpointInvocation(this, service, methodName, getResult()); + } + }); } public void postEndpointInvocation(final Service service, final String methodName, final Message request, final Message.Builder responseBuilder) throws IOException { - execOperation(coprocessors.isEmpty() ? null : new EndpointOperation() { - @Override - public void call(EndpointObserver oserver, ObserverContext ctx) - throws IOException { - oserver.postEndpointInvocation(ctx, service, methodName, request, responseBuilder); - } - }); + execOperation(coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithoutResult(endpointObserverGetter) { + @Override + public void call(EndpointObserver observer) throws IOException { + observer.postEndpointInvocation(this, service, methodName, request, responseBuilder); + } + }); } public DeleteTracker postInstantiateDeleteTracker(DeleteTracker tracker) throws IOException { - return execOperationWithResult(tracker, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { - @Override - public void call(RegionObserver oserver, ObserverContext ctx) - throws IOException { - setResult(oserver.postInstantiateDeleteTracker(ctx, getResult())); - } - }); + return execOperationWithResult(tracker, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(regionObserverGetter) { + @Override + public DeleteTracker call(RegionObserver observer) throws IOException { + return observer.postInstantiateDeleteTracker(this, getResult()); + } + }); } - private static abstract class CoprocessorOperation - extends ObserverContext { - public CoprocessorOperation() { - this(RpcServer.getRequestUser()); - } - - public CoprocessorOperation(User user) { - super(user); - } - - public abstract void call(Coprocessor observer, - ObserverContext ctx) throws IOException; - public abstract boolean hasCall(Coprocessor observer); - public void postEnvCall(RegionEnvironment env) { } + ///////////////////////////////////////////////////////////////////////////////////////////////// + // BulkLoadObserver hooks + ///////////////////////////////////////////////////////////////////////////////////////////////// + public void prePrepareBulkLoad(User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : + new BulkLoadObserverOperation(user) { + @Override protected void call(BulkLoadObserver observer) throws IOException { + observer.prePrepareBulkLoad(this); + } + }); } - private static abstract class RegionOperation extends CoprocessorOperation { - public RegionOperation() { - } - - public RegionOperation(User user) { - super(user); - } - - public abstract void call(RegionObserver observer, - ObserverContext ctx) throws IOException; - - public boolean hasCall(Coprocessor observer) { - return observer instanceof RegionObserver; - } - - public void call(Coprocessor observer, ObserverContext ctx) - throws IOException { - call((RegionObserver)observer, ctx); - } - } - - private static abstract class RegionOperationWithResult extends RegionOperation { - public RegionOperationWithResult() { - } - - public RegionOperationWithResult(User user) { - super (user); - } - - private T result = null; - public void setResult(final T result) { this.result = result; } - public T getResult() { return this.result; } - } - - private static abstract class EndpointOperation extends CoprocessorOperation { - public abstract void call(EndpointObserver observer, - ObserverContext ctx) throws IOException; - - public boolean hasCall(Coprocessor observer) { - return observer instanceof EndpointObserver; - } - - public void call(Coprocessor observer, ObserverContext ctx) - throws IOException { - call((EndpointObserver)observer, ctx); - } - } - - private static abstract class EndpointOperationWithResult extends EndpointOperation { - private T result = null; - public void setResult(final T result) { this.result = result; } - public T getResult() { return this.result; } - } - - private boolean execOperation(final CoprocessorOperation ctx) - throws IOException { - return execOperation(true, ctx); - } - - private T execOperationWithResult(final T defaultValue, - final RegionOperationWithResult ctx) throws IOException { - if (ctx == null) return defaultValue; - ctx.setResult(defaultValue); - execOperation(true, ctx); - return ctx.getResult(); - } - - private T execOperationWithResult(final boolean ifBypass, final T defaultValue, - final RegionOperationWithResult ctx) throws IOException { - boolean bypass = false; - T result = defaultValue; - if (ctx != null) { - ctx.setResult(defaultValue); - bypass = execOperation(true, ctx); - result = ctx.getResult(); - } - return bypass == ifBypass ? result : null; - } - - private T execOperationWithResult(final T defaultValue, - final EndpointOperationWithResult ctx) throws IOException { - if (ctx == null) return defaultValue; - ctx.setResult(defaultValue); - execOperation(true, ctx); - return ctx.getResult(); - } - - private boolean execOperation(final boolean earlyExit, final CoprocessorOperation ctx) - throws IOException { - boolean bypass = false; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - RegionEnvironment env = envs.get(i); - Coprocessor observer = env.getInstance(); - if (ctx.hasCall(observer)) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.call(observer, ctx); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (earlyExit && ctx.shouldComplete()) { - break; - } - } - - ctx.postEnvCall(env); - } - return bypass; + public void preCleanupBulkLoad(User user) throws IOException { + execOperation(coprocEnvironments.isEmpty() ? null : + new BulkLoadObserverOperation(user) { + @Override protected void call(BulkLoadObserver observer) throws IOException { + observer.preCleanupBulkLoad(this); + } + }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java index 5cb87b58800..3325ba30990 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java @@ -19,33 +19,29 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.Comparator; -import java.util.List; -import org.apache.commons.lang3.ClassUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; -import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; +import org.apache.hadoop.hbase.coprocessor.BaseEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity; import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.coprocessor.SingletonCoprocessorService; -import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public class RegionServerCoprocessorHost extends - CoprocessorHost { + CoprocessorHost { private static final Log LOG = LogFactory.getLog(RegionServerCoprocessorHost.class); @@ -70,242 +66,149 @@ public class RegionServerCoprocessorHost extends } @Override - public RegionServerEnvironment createEnvironment(Class implClass, - Coprocessor instance, int priority, int sequence, Configuration conf) { - return new RegionServerEnvironment(implClass, instance, priority, - sequence, conf, this.rsServices); + public RegionServerEnvironment createEnvironment( + RegionServerCoprocessor instance, int priority, int sequence, Configuration conf) { + return new RegionServerEnvironment(instance, priority, sequence, conf, this.rsServices); } + @Override + public RegionServerCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if (RegionServerCoprocessor.class.isAssignableFrom(implClass)) { + return (RegionServerCoprocessor)implClass.newInstance(); + } else if (SingletonCoprocessorService.class.isAssignableFrom(implClass)) { + // For backward compatibility with old CoprocessorService impl which don't extend + // RegionCoprocessor. + return new CoprocessorServiceBackwardCompatiblity.RegionServerCoprocessorService( + (SingletonCoprocessorService)implClass.newInstance()); + } else { + LOG.error(implClass.getName() + " is not of type RegionServerCoprocessor. Check the " + + "configuration " + CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY); + return null; + } + } + + private ObserverGetter rsObserverGetter = + RegionServerCoprocessor::getRegionServerObserver; + + abstract class RegionServerObserverOperation extends + ObserverOperationWithoutResult { + public RegionServerObserverOperation() { + super(rsObserverGetter); + } + + public RegionServerObserverOperation(User user) { + super(rsObserverGetter, user); + } + } + + ////////////////////////////////////////////////////////////////////////////////////////////////// + // RegionServerObserver operations + ////////////////////////////////////////////////////////////////////////////////////////////////// + public void preStop(String message, User user) throws IOException { // While stopping the region server all coprocessors method should be executed first then the // coprocessor should be cleaned up. - execShutdown(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) { + execShutdown(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation(user) { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preStopRegionServer(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.preStopRegionServer(this); } + @Override - public void postEnvCall(RegionServerEnvironment env) { + public void postEnvCall() { // invoke coprocessor stop method - shutdown(env); + shutdown(this.getEnvironment()); } }); } public void preRollWALWriterRequest() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preRollWALWriterRequest(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.preRollWALWriterRequest(this); } }); } public void postRollWALWriterRequest() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postRollWALWriterRequest(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.postRollWALWriterRequest(this); } }); } public void preReplicateLogEntries() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preReplicateLogEntries(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.preReplicateLogEntries(this); } }); } public void postReplicateLogEntries() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postReplicateLogEntries(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.postReplicateLogEntries(this); } }); } public ReplicationEndpoint postCreateReplicationEndPoint(final ReplicationEndpoint endpoint) throws IOException { - return execOperationWithResult(endpoint, coprocessors.isEmpty() ? null - : new CoprocessOperationWithResult() { - @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - setResult(oserver.postCreateReplicationEndPoint(ctx, getResult())); - } - }); + return execOperationWithResult(endpoint, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult( + rsObserverGetter) { + @Override + public ReplicationEndpoint call(RegionServerObserver observer) throws IOException { + return observer.postCreateReplicationEndPoint(this, getResult()); + } + }); } public void preClearCompactionQueues() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.preClearCompactionQueues(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.preClearCompactionQueues(this); } }); } public void postClearCompactionQueues() throws IOException { - execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() { @Override - public void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException { - oserver.postClearCompactionQueues(ctx); + public void call(RegionServerObserver observer) throws IOException { + observer.postClearCompactionQueues(this); } }); } - private T execOperationWithResult(final T defaultValue, - final CoprocessOperationWithResult ctx) throws IOException { - if (ctx == null) - return defaultValue; - ctx.setResult(defaultValue); - execOperation(ctx); - return ctx.getResult(); - } - - private static abstract class CoprocessorOperation - extends ObserverContext { - public CoprocessorOperation() { - this(RpcServer.getRequestUser()); - } - - public CoprocessorOperation(User user) { - super(user); - } - - public abstract void call(RegionServerObserver oserver, - ObserverContext ctx) throws IOException; - - public void postEnvCall(RegionServerEnvironment env) { - } - } - - private static abstract class CoprocessOperationWithResult extends CoprocessorOperation { - private T result = null; - - public void setResult(final T result) { - this.result = result; - } - - public T getResult() { - return this.result; - } - } - - private boolean execOperation(final CoprocessorOperation ctx) throws IOException { - if (ctx == null) return false; - boolean bypass = false; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - RegionServerEnvironment env = envs.get(i); - if (env.getInstance() instanceof RegionServerObserver) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.call((RegionServerObserver)env.getInstance(), ctx); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } - } - ctx.postEnvCall(env); - } - return bypass; - } - - /** - * RegionServer coprocessor classes can be configured in any order, based on that priority is set - * and chained in a sorted order. For preStop(), coprocessor methods are invoked in call() and - * environment is shutdown in postEnvCall().
    - * Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors - * may remain shutdown if any exception occurs during next coprocessor execution which prevent - * RegionServer stop. (Refer: - * HBASE-16663 - * @param ctx CoprocessorOperation - * @return true if bypaas coprocessor execution, false if not. - * @throws IOException - */ - private boolean execShutdown(final CoprocessorOperation ctx) throws IOException { - if (ctx == null) return false; - boolean bypass = false; - List envs = coprocessors.get(); - int envsSize = envs.size(); - // Iterate the coprocessors and execute CoprocessorOperation's call() - for (int i = 0; i < envsSize; i++) { - RegionServerEnvironment env = envs.get(i); - if (env.getInstance() instanceof RegionServerObserver) { - ctx.prepare(env); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - ctx.call((RegionServerObserver) env.getInstance(), ctx); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } - } - } - - // Iterate the coprocessors and execute CoprocessorOperation's postEnvCall() - for (int i = 0; i < envsSize; i++) { - RegionServerEnvironment env = envs.get(i); - ctx.postEnvCall(env); - } - return bypass; - } - /** * Coprocessor environment extension providing access to region server * related services. */ - static class RegionServerEnvironment extends CoprocessorHost.Environment + private static class RegionServerEnvironment extends BaseEnvironment implements RegionServerCoprocessorEnvironment { private final RegionServerServices regionServerServices; private final MetricRegistry metricRegistry; @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BC_UNCONFIRMED_CAST", justification="Intentional; FB has trouble detecting isAssignableFrom") - public RegionServerEnvironment(final Class implClass, - final Coprocessor impl, final int priority, final int seq, - final Configuration conf, final RegionServerServices services) { + public RegionServerEnvironment(final RegionServerCoprocessor impl, final int priority, + final int seq, final Configuration conf, final RegionServerServices services) { super(impl, priority, seq, conf); this.regionServerServices = services; - for (Object itf : ClassUtils.getAllInterfaces(implClass)) { - Class c = (Class) itf; - if (SingletonCoprocessorService.class.isAssignableFrom(c)) {// FindBugs: BC_UNCONFIRMED_CAST - this.regionServerServices.registerService( - ((SingletonCoprocessorService) impl).getService()); - break; - } - } + impl.getService().ifPresent(regionServerServices::registerService); this.metricRegistry = - MetricsCoprocessor.createRegistryForRSCoprocessor(implClass.getName()); + MetricsCoprocessor.createRegistryForRSCoprocessor(impl.getClass().getName()); } @Override @@ -319,32 +222,9 @@ public class RegionServerCoprocessorHost extends } @Override - protected void shutdown() { + public void shutdown() { super.shutdown(); MetricsCoprocessor.removeRegistry(metricRegistry); } } - - /** - * Environment priority comparator. Coprocessors are chained in sorted - * order. - */ - static class EnvironmentPriorityComparator implements - Comparator { - @Override - public int compare(final CoprocessorEnvironment env1, - final CoprocessorEnvironment env2) { - if (env1.getPriority() < env2.getPriority()) { - return -1; - } else if (env1.getPriority() > env2.getPriority()) { - return 1; - } - if (env1.getLoadSequence() < env2.getLoadSequence()) { - return -1; - } else if (env1.getLoadSequence() > env2.getLoadSequence()) { - return 1; - } - return 0; - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index d2b8567c52b..c7d0eadd9ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -33,7 +33,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @@ -138,38 +140,17 @@ public class SecureBulkLoadManager { public String prepareBulkLoad(final Region region, final PrepareBulkLoadRequest request) throws IOException { - List bulkLoadObservers = getBulkLoadObservers(region); + region.getCoprocessorHost().prePrepareBulkLoad(getActiveUser()); - if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext ctx = new ObserverContext<>(getActiveUser()); - ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() - .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); - - for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) { - bulkLoadObserver.prePrepareBulkLoad(ctx); - } - } - - String bulkToken = - createStagingDir(baseStagingDir, getActiveUser(), region.getTableDescriptor().getTableName()) - .toString(); + String bulkToken = createStagingDir(baseStagingDir, getActiveUser(), + region.getTableDescriptor().getTableName()).toString(); return bulkToken; } public void cleanupBulkLoad(final Region region, final CleanupBulkLoadRequest request) throws IOException { - List bulkLoadObservers = getBulkLoadObservers(region); - - if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) { - ObserverContext ctx = new ObserverContext<>(getActiveUser()); - ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost() - .findCoprocessorEnvironment(BulkLoadObserver.class).get(0)); - - for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) { - bulkLoadObserver.preCleanupBulkLoad(ctx); - } - } + region.getCoprocessorHost().preCleanupBulkLoad(getActiveUser()); Path path = new Path(request.getBulkToken()); if (!fs.delete(path, true)) { @@ -275,13 +256,6 @@ public class SecureBulkLoadManager { return map; } - private List getBulkLoadObservers(Region region) { - List coprocessorList = - region.getCoprocessorHost().findCoprocessors(BulkLoadObserver.class); - - return coprocessorList; - } - private Path createStagingDir(Path baseDir, User user, TableName tableName) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java index b6d23bf8143..73ba776ea3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java @@ -21,22 +21,23 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; -import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.coprocessor.BaseEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.WALCoprocessor; import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.WALObserver; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.yetus.audience.InterfaceAudience; /** * Implements the coprocessor environment and runtime support for coprocessors @@ -44,12 +45,13 @@ import org.apache.hadoop.hbase.wal.WALKey; */ @InterfaceAudience.Private public class WALCoprocessorHost - extends CoprocessorHost { + extends CoprocessorHost { + private static final Log LOG = LogFactory.getLog(WALCoprocessorHost.class); /** * Encapsulation of the environment of each coprocessor */ - static class WALEnvironment extends CoprocessorHost.Environment + static class WALEnvironment extends BaseEnvironment implements WALCoprocessorEnvironment { private final WAL wal; @@ -63,19 +65,18 @@ public class WALCoprocessorHost /** * Constructor - * @param implClass - not used * @param impl the coprocessor instance * @param priority chaining priority * @param seq load sequence * @param conf configuration * @param wal WAL */ - public WALEnvironment(Class implClass, final Coprocessor impl, - final int priority, final int seq, final Configuration conf, - final WAL wal) { + private WALEnvironment(final WALCoprocessor impl, final int priority, final int seq, + final Configuration conf, final WAL wal) { super(impl, priority, seq, conf); this.wal = wal; - this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor(implClass.getName()); + this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor( + impl.getClass().getName()); } @Override @@ -84,7 +85,7 @@ public class WALCoprocessorHost } @Override - protected void shutdown() { + public void shutdown() { super.shutdown(); MetricsCoprocessor.removeRegistry(this.metricRegistry); } @@ -111,13 +112,34 @@ public class WALCoprocessorHost } @Override - public WALEnvironment createEnvironment(final Class implClass, - final Coprocessor instance, final int priority, final int seq, - final Configuration conf) { - return new WALEnvironment(implClass, instance, priority, seq, conf, - this.wal); + public WALEnvironment createEnvironment(final WALCoprocessor instance, final int priority, + final int seq, final Configuration conf) { + return new WALEnvironment(instance, priority, seq, conf, this.wal); } + @Override + public WALCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if (WALCoprocessor.class.isAssignableFrom(implClass)) { + return (WALCoprocessor)implClass.newInstance(); + } else { + LOG.error(implClass.getName() + " is not of type WALCoprocessor. Check the " + + "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); + return null; + } + } + + private ObserverGetter walObserverGetter = + WALCoprocessor::getWALObserver; + + abstract class WALObserverOperation extends + ObserverOperationWithoutResult { + public WALObserverOperation() { + super(walObserverGetter); + } + } + + /** * @param info * @param logKey @@ -127,32 +149,13 @@ public class WALCoprocessorHost */ public boolean preWALWrite(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { - boolean bypass = false; - if (this.coprocessors == null || this.coprocessors.isEmpty()) return bypass; - ObserverContext ctx = null; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - WALEnvironment env = envs.get(i); - if (env.getInstance() instanceof WALObserver) { - final WALObserver observer = (WALObserver)env.getInstance(); - ctx = ObserverContext.createAndPrepare(env, ctx); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - observer.preWALWrite(ctx, info, logKey, logEdit); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } + return execOperationWithResult(false, coprocEnvironments.isEmpty() ? null : + new ObserverOperationWithResult(walObserverGetter) { + @Override + public Boolean call(WALObserver oserver) throws IOException { + return oserver.preWALWrite(this, info, logKey, logEdit); } - } - return bypass; + }); } /** @@ -163,29 +166,12 @@ public class WALCoprocessorHost */ public void postWALWrite(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit) throws IOException { - if (this.coprocessors == null || this.coprocessors.isEmpty()) return; - ObserverContext ctx = null; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - WALEnvironment env = envs.get(i); - if (env.getInstance() instanceof WALObserver) { - final WALObserver observer = (WALObserver)env.getInstance(); - ctx = ObserverContext.createAndPrepare(env, ctx); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - observer.postWALWrite(ctx, info, logKey, logEdit); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - if (ctx.shouldComplete()) { - break; - } + execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { + @Override + protected void call(WALObserver observer) throws IOException { + observer.postWALWrite(this, info, logKey, logEdit); } - } + }); } /** @@ -194,29 +180,12 @@ public class WALCoprocessorHost * @param newPath the path of the wal we are going to create */ public void preWALRoll(Path oldPath, Path newPath) throws IOException { - if (this.coprocessors == null || this.coprocessors.isEmpty()) return; - ObserverContext ctx = null; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - WALEnvironment env = envs.get(i); - if (env.getInstance() instanceof WALObserver) { - final WALObserver observer = (WALObserver)env.getInstance(); - ctx = ObserverContext.createAndPrepare(env, ctx); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - observer.preWALRoll(ctx, oldPath, newPath); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - if (ctx.shouldComplete()) { - break; - } + execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { + @Override + protected void call(WALObserver observer) throws IOException { + observer.preWALRoll(this, oldPath, newPath); } - } + }); } /** @@ -225,28 +194,11 @@ public class WALCoprocessorHost * @param newPath the path of the wal we have created and now is the current */ public void postWALRoll(Path oldPath, Path newPath) throws IOException { - if (this.coprocessors == null || this.coprocessors.isEmpty()) return; - ObserverContext ctx = null; - List envs = coprocessors.get(); - for (int i = 0; i < envs.size(); i++) { - WALEnvironment env = envs.get(i); - if (env.getInstance() instanceof WALObserver) { - final WALObserver observer = (WALObserver)env.getInstance(); - ctx = ObserverContext.createAndPrepare(env, ctx); - Thread currentThread = Thread.currentThread(); - ClassLoader cl = currentThread.getContextClassLoader(); - try { - currentThread.setContextClassLoader(env.getClassLoader()); - observer.postWALRoll(ctx, oldPath, newPath); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } finally { - currentThread.setContextClassLoader(cl); - } - if (ctx.shouldComplete()) { - break; - } + execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() { + @Override + protected void call(WALObserver observer) throws IOException { + observer.postWALRoll(this, oldPath, newPath); } - } + }); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java index fdb951b2d26..32ec617b54d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationObserver.java @@ -21,11 +21,13 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; @@ -40,9 +42,14 @@ import org.apache.hadoop.hbase.util.Pair; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class ReplicationObserver implements RegionObserver { +public class ReplicationObserver implements RegionCoprocessor, RegionObserver { private static final Log LOG = LogFactory.getLog(ReplicationObserver.class); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preCommitStoreFile(final ObserverContext ctx, final byte[] family, final List> pairs) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 7081ea17fd0..d66b754c53b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -28,6 +28,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -74,13 +75,15 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.filter.ByteArrayComparable; @@ -169,8 +172,10 @@ import org.apache.yetus.audience.InterfaceAudience; *

    */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class AccessController implements MasterObserver, RegionObserver, RegionServerObserver, - AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { +public class AccessController implements MasterCoprocessor, RegionCoprocessor, + RegionServerCoprocessor, AccessControlService.Interface, + MasterObserver, RegionObserver, RegionServerObserver, EndpointObserver, BulkLoadObserver { + // TODO: encapsulate observer functions into separate class/sub-class. private static final Log LOG = LogFactory.getLog(AccessController.class); @@ -987,6 +992,39 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS } } + /*********************************** Observer/Service Getters ***********************************/ + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + + @Override + public Optional getEndpointObserver() { + return Optional.of(this); + } + + @Override + public Optional getBulkLoadObserver() { + return Optional.of(this); + } + + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } + + @Override + public Optional getService() { + return Optional.of(AccessControlProtos.AccessControlService.newReflectiveService(this)); + } + + /*********************************** Observer implementations ***********************************/ + @Override public void preCreateTable(ObserverContext c, TableDescriptor desc, RegionInfo[] regions) throws IOException { @@ -2448,11 +2486,6 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS done.run(response); } - @Override - public Service getService() { - return AccessControlProtos.AccessControlService.newReflectiveService(this); - } - private Region getRegion(RegionCoprocessorEnvironment e) { return e.getRegion(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java index 0b765d7f64c..5b4acbe16b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.util.Collection; +import java.util.Optional; import java.util.regex.Matcher; import org.apache.commons.io.FilenameUtils; @@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -43,7 +45,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Master observer for restricting coprocessor assignments. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class CoprocessorWhitelistMasterObserver implements MasterObserver { +public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, MasterObserver { public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY = "hbase.coprocessor.region.whitelist.paths"; @@ -51,6 +53,11 @@ public class CoprocessorWhitelistMasterObserver implements MasterObserver { private static final Log LOG = LogFactory .getLog(CoprocessorWhitelistMasterObserver.class); + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preModifyTable(ObserverContext ctx, TableName tableName, TableDescriptor htd) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index 50b8765d70f..4b1f28ea91d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -19,13 +19,12 @@ package org.apache.hadoop.hbase.security.token; import java.io.IOException; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -42,6 +41,7 @@ import org.apache.hadoop.security.token.Token; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; +import org.apache.yetus.audience.InterfaceAudience; /** * Provides a service for obtaining authentication tokens via the @@ -49,7 +49,7 @@ import com.google.protobuf.Service; */ @InterfaceAudience.Private public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface, - Coprocessor, CoprocessorService { + RegionCoprocessor { private static final Log LOG = LogFactory.getLog(TokenProvider.class); @@ -96,8 +96,8 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService // AuthenticationService implementation @Override - public Service getService() { - return AuthenticationProtos.AuthenticationService.newReflectiveService(this); + public Optional getService() { + return Optional.of(AuthenticationProtos.AuthenticationService.newReflectiveService(this)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index b3b1bc4212d..671e98998c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -64,14 +65,13 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.filter.Filter; @@ -101,7 +101,6 @@ import org.apache.hadoop.hbase.regionserver.OperationStatus; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker; -import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; @@ -122,8 +121,9 @@ import com.google.protobuf.Service; * visibility labels */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -public class VisibilityController implements MasterObserver, RegionObserver, - VisibilityLabelsService.Interface, CoprocessorService { +// TODO: break out Observer functions into separate class/sub-class. +public class VisibilityController implements MasterCoprocessor, RegionCoprocessor, + VisibilityLabelsService.Interface, MasterObserver, RegionObserver { private static final Log LOG = LogFactory.getLog(VisibilityController.class); private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger." @@ -176,10 +176,6 @@ public class VisibilityController implements MasterObserver, RegionObserver, + " accordingly."); } - if (env instanceof RegionServerCoprocessorEnvironment) { - throw new RuntimeException("Visibility controller should not be configured as " - + "'hbase.coprocessor.regionserver.classes'."); - } // Do not create for master CPs if (!(env instanceof MasterCoprocessorEnvironment)) { visibilityLabelService = VisibilityLabelServiceManager.getInstance() @@ -192,6 +188,22 @@ public class VisibilityController implements MasterObserver, RegionObserver, } + /**************************** Observer/Service Getters ************************************/ + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + + @Override + public Optional getService() { + return Optional.of(VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this)); + } + /********************************* Master related hooks **********************************/ @Override @@ -760,11 +772,6 @@ public class VisibilityController implements MasterObserver, RegionObserver, return rewriteCell; } - @Override - public Service getService() { - return VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this); - } - @Override public boolean postScannerFilterRow(final ObserverContext e, final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException { @@ -1086,35 +1093,6 @@ public class VisibilityController implements MasterObserver, RegionObserver, } } - /** - * A RegionServerObserver impl that provides the custom - * VisibilityReplicationEndpoint. This class should be configured as the - * 'hbase.coprocessor.regionserver.classes' for the visibility tags to be - * replicated as string. The value for the configuration should be - * 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'. - */ - public static class VisibilityReplication implements RegionServerObserver { - private Configuration conf; - private VisibilityLabelService visibilityLabelService; - - @Override - public void start(CoprocessorEnvironment env) throws IOException { - this.conf = env.getConfiguration(); - visibilityLabelService = VisibilityLabelServiceManager.getInstance() - .getVisibilityLabelService(this.conf); - } - - @Override - public void stop(CoprocessorEnvironment env) throws IOException { - } - - @Override - public ReplicationEndpoint postCreateReplicationEndPoint( - ObserverContext ctx, ReplicationEndpoint endpoint) { - return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService); - } - } - /** * @param t * @return NameValuePair of the exception name to stringified version os exception. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java new file mode 100644 index 00000000000..6887c313525 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java @@ -0,0 +1,64 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.security.visibility; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; +import org.apache.hadoop.hbase.replication.ReplicationEndpoint; + +import java.io.IOException; +import java.util.Optional; + +/** + * A RegionServerObserver impl that provides the custom + * VisibilityReplicationEndpoint. This class should be configured as the + * 'hbase.coprocessor.regionserver.classes' for the visibility tags to be + * replicated as string. The value for the configuration should be + * 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'. + */ +public class VisibilityReplication implements RegionServerCoprocessor, RegionServerObserver { + private Configuration conf; + private VisibilityLabelService visibilityLabelService; + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + this.conf = env.getConfiguration(); + visibilityLabelService = VisibilityLabelServiceManager.getInstance() + .getVisibilityLabelService(this.conf); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + } + + @Override public Optional getRegionServerObserver() { + return Optional.of(this); + } + + @Override + public ReplicationEndpoint postCreateReplicationEndPoint( + ObserverContext ctx, ReplicationEndpoint endpoint) { + return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index 5ec61d481a9..60fd22d8523 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -23,12 +23,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.OperationStatus; import java.io.IOException; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; /** @@ -58,9 +60,15 @@ import java.util.concurrent.atomic.AtomicLong; * 0 row(s) in 0.0050 seconds *

    */ -public class WriteSinkCoprocessor implements RegionObserver { +public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver { private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class); private final AtomicLong ops = new AtomicLong(); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + private String regionName; @Override @@ -68,7 +76,6 @@ public class WriteSinkCoprocessor implements RegionObserver { regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(); } - @Override public void preBatchMutate(final ObserverContext c, final MiniBatchOperationInProgress miniBatchOp) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 682709ee67c..cfe4d1fa4ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; @@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @@ -159,13 +161,18 @@ public class HConnectionTestingUtility { /** * This coproceesor sleep 2s at first increment/append rpc call. */ - public static class SleepAtFirstRpcCall implements RegionObserver { + public static class SleepAtFirstRpcCall implements RegionCoprocessor, RegionObserver { static final AtomicLong ct = new AtomicLong(0); static final String SLEEP_TIME_CONF_KEY = "hbase.coprocessor.SleepAtFirstRpcCall.sleepTime"; static final long DEFAULT_SLEEP_TIME = 2000; static final AtomicLong sleepTime = new AtomicLong(DEFAULT_SLEEP_TIME); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + public SleepAtFirstRpcCall() { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java index 350bf6e406b..05324aa8ef4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.java @@ -24,6 +24,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -35,6 +36,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -170,10 +172,15 @@ public class TestAsyncAdminBuilder { } } - public static class TestRpcTimeoutCoprocessor implements MasterObserver { + public static class TestRpcTimeoutCoprocessor implements MasterCoprocessor, MasterObserver { public TestRpcTimeoutCoprocessor() { } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } @Override public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException { @@ -181,12 +188,17 @@ public class TestAsyncAdminBuilder { } } - public static class TestOperationTimeoutCoprocessor implements MasterObserver { + public static class TestOperationTimeoutCoprocessor implements MasterCoprocessor, MasterObserver { AtomicLong sleepTime = new AtomicLong(0); public TestOperationTimeoutCoprocessor() { } + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException { @@ -197,12 +209,17 @@ public class TestAsyncAdminBuilder { } } - public static class TestMaxRetriesCoprocessor implements MasterObserver { + public static class TestMaxRetriesCoprocessor implements MasterCoprocessor, MasterObserver { AtomicLong retryNum = new AtomicLong(0); public TestMaxRetriesCoprocessor() { } + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 70df318a9ba..efa2c1e3958 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; @@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -73,7 +75,12 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit { private static AtomicInteger MAX_CONCURRENCY = new AtomicInteger(0); - public static final class CountingRegionObserver implements RegionObserver { + public static final class CountingRegionObserver implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public RegionScanner preScannerOpen(ObserverContext e, Scan scan, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java index e4c343a1657..8a341b66214 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; @@ -65,7 +67,11 @@ public class TestAsyncRegionLocatorTimeout { private static volatile long SLEEP_MS = 0L; - public static class SleepRegionObserver implements RegionObserver { + public static class SleepRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public RegionScanner preScannerOpen(ObserverContext e, Scan scan, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java index 6c9dd86c619..fce904154cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java @@ -28,6 +28,7 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ForkJoinPool; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -202,7 +204,12 @@ public class TestAsyncTableBatch { assertEquals(4, Bytes.toInt(appendValue, 8)); } - public static final class ErrorInjectObserver implements RegionObserver { + public static final class ErrorInjectObserver implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public void preGetOp(ObserverContext e, Get get, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index b389d9e6cba..30fe7318c25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.OptionalInt; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.hfile.BlockCache; @@ -253,7 +255,12 @@ public class TestAvoidCellReferencesIntoShippedBlocks { } } - public static class CompactorRegionObserver implements RegionObserver { + public static class CompactorRegionObserver implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public InternalScanner preCompactScannerOpen(ObserverContext c, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index e1b31e776e6..d558307e430 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.hfile.BlockCache; @@ -1549,7 +1551,7 @@ public class TestBlockEvictionFromClient { } } - public static class CustomInnerRegionObserver implements RegionObserver { + public static class CustomInnerRegionObserver implements RegionCoprocessor, RegionObserver { static final AtomicLong sleepTime = new AtomicLong(0); static final AtomicBoolean slowDownNext = new AtomicBoolean(false); static final AtomicInteger countOfNext = new AtomicInteger(0); @@ -1559,6 +1561,11 @@ public class TestBlockEvictionFromClient { private static final AtomicReference cdl = new AtomicReference<>( new CountDownLatch(0)); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public boolean postScannerNext(ObserverContext e, InternalScanner s, List results, int limit, boolean hasMore) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java index 62ceca3d8b8..e92ba234533 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -45,6 +46,7 @@ import java.io.InterruptedIOException; import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; @Category({MediumTests.class, ClientTests.class}) @@ -58,7 +60,12 @@ public class TestClientOperationInterrupt { private static final byte[] test = Bytes.toBytes("test"); private static Configuration conf; - public static class TestCoprocessor implements RegionObserver { + public static class TestCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 379ab312ea5..6b0359439df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -189,10 +191,15 @@ public class TestEnableTable { } } - public static class MasterSyncObserver implements MasterObserver { + public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { volatile CountDownLatch tableCreationLatch = null; volatile CountDownLatch tableDeletionLatch = null; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void postCompletedCreateTableAction( final ObserverContext ctx, @@ -222,8 +229,8 @@ public class TestEnableTable { throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); observer.tableCreationLatch = new CountDownLatch(1); Admin admin = testUtil.getAdmin(); if (splitKeys != null) { @@ -240,8 +247,8 @@ public class TestEnableTable { throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); observer.tableDeletionLatch = new CountDownLatch(1); Admin admin = testUtil.getAdmin(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 10169aba26a..a938db6a649 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.NavigableSet; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -74,6 +75,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.ScannerResetException; @@ -543,7 +545,7 @@ public class TestFromClientSide { * This is a coprocessor to inject a test failure so that a store scanner.reseek() call will * fail with an IOException() on the first call. */ - public static class ExceptionInReseekRegionObserver implements RegionObserver { + public static class ExceptionInReseekRegionObserver implements RegionCoprocessor, RegionObserver { static AtomicLong reqCount = new AtomicLong(0); static AtomicBoolean isDoNotRetry = new AtomicBoolean(false); // whether to throw DNRIOE static AtomicBoolean throwOnce = new AtomicBoolean(true); // whether to only throw once @@ -554,6 +556,11 @@ public class TestFromClientSide { throwOnce.set(true); } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + class MyStoreScanner extends StoreScanner { public MyStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet columns, long readPt) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 89ea5b790a5..ca0a5ea06e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -688,7 +690,7 @@ public class TestFromClientSide3 { private void testPreBatchMutate(TableName tableName, Runnable rn)throws Exception { HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addCoprocessor(WatiingForScanObserver.class.getName()); + desc.addCoprocessor(WaitingForScanObserver.class.getName()); desc.addFamily(new HColumnDescriptor(FAMILY)); TEST_UTIL.getAdmin().createTable(desc); ExecutorService service = Executors.newFixedThreadPool(2); @@ -720,7 +722,7 @@ public class TestFromClientSide3 { public void testLockLeakWithDelta() throws Exception, Throwable { final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addCoprocessor(WatiingForMultiMutationsObserver.class.getName()); + desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName()); desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000)); desc.addFamily(new HColumnDescriptor(FAMILY)); TEST_UTIL.getAdmin().createTable(desc); @@ -735,7 +737,7 @@ public class TestFromClientSide3 { try (Table table = con.getTable(tableName)) { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); - // the put will be blocked by WatiingForMultiMutationsObserver. + // the put will be blocked by WaitingForMultiMutationsObserver. table.put(put); } catch (IOException ex) { throw new RuntimeException(ex); @@ -753,7 +755,7 @@ public class TestFromClientSide3 { }); appendService.shutdown(); appendService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - WatiingForMultiMutationsObserver observer = find(tableName, WatiingForMultiMutationsObserver.class); + WaitingForMultiMutationsObserver observer = find(tableName, WaitingForMultiMutationsObserver.class); observer.latch.countDown(); putService.shutdown(); putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); @@ -774,7 +776,7 @@ public class TestFromClientSide3 { final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addCoprocessor(MultiRowMutationEndpoint.class.getName()); - desc.addCoprocessor(WatiingForMultiMutationsObserver.class.getName()); + desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName()); desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000)); desc.addFamily(new HColumnDescriptor(FAMILY)); TEST_UTIL.getAdmin().createTable(desc); @@ -793,7 +795,7 @@ public class TestFromClientSide3 { try (Table table = con.getTable(tableName)) { Put put0 = new Put(rowLocked); put0.addColumn(FAMILY, QUALIFIER, value0); - // the put will be blocked by WatiingForMultiMutationsObserver. + // the put will be blocked by WaitingForMultiMutationsObserver. table.put(put0); } catch (IOException ex) { throw new RuntimeException(ex); @@ -830,7 +832,7 @@ public class TestFromClientSide3 { }); cpService.shutdown(); cpService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); - WatiingForMultiMutationsObserver observer = find(tableName, WatiingForMultiMutationsObserver.class); + WaitingForMultiMutationsObserver observer = find(tableName, WaitingForMultiMutationsObserver.class); observer.latch.countDown(); putService.shutdown(); putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); @@ -975,8 +977,15 @@ public class TestFromClientSide3 { return clz.cast(cp); } - public static class WatiingForMultiMutationsObserver implements RegionObserver { + public static class WaitingForMultiMutationsObserver + implements RegionCoprocessor, RegionObserver { final CountDownLatch latch = new CountDownLatch(1); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postBatchMutate(final ObserverContext c, final MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -988,8 +997,14 @@ public class TestFromClientSide3 { } } - public static class WatiingForScanObserver implements RegionObserver { + public static class WaitingForScanObserver implements RegionCoprocessor, RegionObserver { private final CountDownLatch latch = new CountDownLatch(1); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postBatchMutate(final ObserverContext c, final MiniBatchOperationInProgress miniBatchOp) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 56c8c7c97c7..1a674575229 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -31,6 +31,7 @@ import java.lang.reflect.Modifier; import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; @@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; @@ -118,7 +120,7 @@ public class TestHCM { /** * This copro sleeps 20 second. The first call it fails. The second time, it works. */ - public static class SleepAndFailFirstTime implements RegionObserver { + public static class SleepAndFailFirstTime implements RegionCoprocessor, RegionObserver { static final AtomicLong ct = new AtomicLong(0); static final String SLEEP_TIME_CONF_KEY = "hbase.coprocessor.SleepAndFailFirstTime.sleepTime"; @@ -128,6 +130,11 @@ public class TestHCM { public SleepAndFailFirstTime() { } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postOpen(ObserverContext c) { RegionCoprocessorEnvironment env = c.getEnvironment(); @@ -175,8 +182,14 @@ public class TestHCM { } - public static class SleepCoprocessor implements RegionObserver { + public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 5000; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { @@ -204,9 +217,15 @@ public class TestHCM { } - public static class SleepLongerAtFirstCoprocessor implements RegionObserver { + public static class SleepLongerAtFirstCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 2000; static final AtomicLong ct = new AtomicLong(0); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java index b938f7e16d8..1745c8201ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; import java.io.InterruptedIOException; +import java.util.Optional; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; @@ -137,7 +139,13 @@ public class TestMobCloneSnapshotFromClient extends TestCloneSnapshotFromClient /** * This coprocessor is used to delay the flush. */ - public static class DelayFlushCoprocessor implements RegionObserver { + public static class DelayFlushCoprocessor implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preFlush(ObserverContext e) throws IOException { if (delayFlush) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index 264c646b895..d580b42084a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -83,13 +85,18 @@ public class TestReplicaWithCluster { /** * This copro is used to synchronize the tests. */ - public static class SlowMeCopro implements RegionObserver { + public static class SlowMeCopro implements RegionCoprocessor, RegionObserver { static final AtomicLong sleepTime = new AtomicLong(0); static final AtomicReference cdl = new AtomicReference<>(new CountDownLatch(0)); public SlowMeCopro() { } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { @@ -119,11 +126,16 @@ public class TestReplicaWithCluster { /** * This copro is used to simulate region server down exception for Get and Scan */ - public static class RegionServerStoppedCopro implements RegionObserver { + public static class RegionServerStoppedCopro implements RegionCoprocessor, RegionObserver { public RegionServerStoppedCopro() { } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { @@ -164,10 +176,16 @@ public class TestReplicaWithCluster { /** * This copro is used to slow down the primary meta region scan a bit */ - public static class RegionServerHostingPrimayMetaRegionSlowOrStopCopro implements RegionObserver { + public static class RegionServerHostingPrimayMetaRegionSlowOrStopCopro + implements RegionCoprocessor, RegionObserver { static boolean slowDownPrimaryMetaScan = false; static boolean throwException = false; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index 46c3f0dd600..1a3cfbff0d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.Random; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -98,7 +100,7 @@ public class TestReplicasClient { /** * This copro is used to synchronize the tests. */ - public static class SlowMeCopro implements RegionObserver { + public static class SlowMeCopro implements RegionCoprocessor, RegionObserver { static final AtomicLong sleepTime = new AtomicLong(0); static final AtomicBoolean slowDownNext = new AtomicBoolean(false); static final AtomicInteger countOfNext = new AtomicInteger(0); @@ -108,6 +110,11 @@ public class TestReplicasClient { public SlowMeCopro() { } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java index 4425fb2a873..1fb0f6458ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultFromCoprocessor.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.Arrays; +import java.util.Optional; + import static junit.framework.TestCase.assertTrue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -29,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -101,7 +104,11 @@ public class TestResultFromCoprocessor { } } - public static class MyObserver implements RegionObserver { + public static class MyObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public Result postAppend(final ObserverContext c, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java index aba5b071a17..b1126e59f5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestServerBusyException.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; @@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.ipc.ServerTooBusyException; @@ -66,8 +68,13 @@ public class TestServerBusyException { @Rule public TestName name = new TestName(); - public static class SleepCoprocessor implements RegionObserver { + public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 5000; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { @@ -95,9 +102,15 @@ public class TestServerBusyException { } - public static class SleepLongerAtFirstCoprocessor implements RegionObserver { + public static class SleepLongerAtFirstCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 2000; static final AtomicLong ct = new AtomicLong(0); + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preGetOp(final ObserverContext e, final Get get, final List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java similarity index 92% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java index e338941c4e1..6dbd04f0a9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALCoprocessor.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -39,9 +40,10 @@ import org.apache.hadoop.hbase.wal.WALKey; * passed-in WALEdit, i.e, ignore specified columns when writing, or add a KeyValue. On the other * side, it checks whether the ignored column is still in WAL when Restoreed at region reconstruct. */ -public class SampleRegionWALObserver implements WALObserver, RegionObserver { +public class SampleRegionWALCoprocessor implements WALCoprocessor, RegionCoprocessor, + WALObserver, RegionObserver { - private static final Log LOG = LogFactory.getLog(SampleRegionWALObserver.class); + private static final Log LOG = LogFactory.getLog(SampleRegionWALCoprocessor.class); private byte[] tableName; private byte[] row; @@ -81,6 +83,15 @@ public class SampleRegionWALObserver implements WALObserver, RegionObserver { postWALRollCalled = false; } + @Override public Optional getWALObserver() { + return Optional.of(this); + } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postWALWrite(ObserverContext env, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { @@ -167,13 +178,13 @@ public class SampleRegionWALObserver implements WALObserver, RegionObserver { } public boolean isPreWALRestoreCalled() { - LOG.debug(SampleRegionWALObserver.class.getName() + + LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPreWALRestoreCalled is called."); return preWALRestoreCalled; } public boolean isPostWALRestoreCalled() { - LOG.debug(SampleRegionWALObserver.class.getName() + + LOG.debug(SampleRegionWALCoprocessor.class.getName() + ".isPostWALRestoreCalled is called."); return postWALRestoreCalled; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 5868bf9a0bd..727fa398c08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -72,7 +73,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList; * A sample region observer that tests the RegionObserver interface. * It works with TestRegionObserverInterface to provide the test case. */ -public class SimpleRegionObserver implements RegionObserver { +public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver { final AtomicInteger ctBeforeDelete = new AtomicInteger(1); final AtomicInteger ctPreOpen = new AtomicInteger(0); @@ -134,6 +135,11 @@ public class SimpleRegionObserver implements RegionObserver { throwOnPostFlush.set(val); } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void start(CoprocessorEnvironment e) throws IOException { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java index 1102cf84283..6213e86bf5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java @@ -20,13 +20,13 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import static org.mockito.Mockito.*; import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; @@ -74,7 +74,8 @@ public class TestCoprocessorConfiguration { private static final AtomicBoolean systemCoprocessorLoaded = new AtomicBoolean(); private static final AtomicBoolean tableCoprocessorLoaded = new AtomicBoolean(); - public static class SystemCoprocessor implements Coprocessor { + public static class SystemCoprocessor implements MasterCoprocessor, RegionCoprocessor, + RegionServerCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { systemCoprocessorLoaded.set(true); @@ -84,7 +85,7 @@ public class TestCoprocessorConfiguration { public void stop(CoprocessorEnvironment env) throws IOException { } } - public static class TableCoprocessor implements Coprocessor { + public static class TableCoprocessor implements RegionCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { tableCoprocessorLoaded.set(true); @@ -108,7 +109,7 @@ public class TestCoprocessorConfiguration { systemCoprocessorLoaded.get(), CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); assertEquals("Table coprocessors loading default was not honored", - tableCoprocessorLoaded.get(), + tableCoprocessorLoaded.get(), CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED && CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java index f0915e01fff..03cae78e045 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java @@ -39,7 +39,7 @@ public class TestCoprocessorHost { /** * An {@link Abortable} implementation for tests. */ - class TestAbortable implements Abortable { + private class TestAbortable implements Abortable { private volatile boolean aborted = false; @Override @@ -56,13 +56,23 @@ public class TestCoprocessorHost { @Test public void testDoubleLoadingAndPriorityValue() { final Configuration conf = HBaseConfiguration.create(); - CoprocessorHost host = - new CoprocessorHost(new TestAbortable()) { - final Configuration cpHostConf = conf; + CoprocessorHost> host = + new CoprocessorHost>( + new TestAbortable()) { + @Override + public RegionCoprocessor checkAndGetInstance(Class implClass) + throws InstantiationException, IllegalAccessException { + if(RegionCoprocessor.class.isAssignableFrom(implClass)) { + return (RegionCoprocessor)implClass.newInstance(); + } + return null; + } + + final Configuration cpHostConf = conf; @Override - public CoprocessorEnvironment createEnvironment(Class implClass, - final Coprocessor instance, final int priority, int sequence, Configuration conf) { + public CoprocessorEnvironment createEnvironment(final RegionCoprocessor instance, + final int priority, int sequence, Configuration conf) { return new CoprocessorEnvironment() { final Coprocessor envInstance = instance; @@ -106,6 +116,12 @@ public class TestCoprocessorHost { return null; } + @Override + public void startup() throws IOException {} + + @Override + public void shutdown() {} + @Override public ClassLoader getClassLoader() { return null; @@ -116,13 +132,16 @@ public class TestCoprocessorHost { final String key = "KEY"; final String coprocessor = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; // Try and load a coprocessor three times - conf.setStrings(key, coprocessor, coprocessor, coprocessor, SimpleRegionObserverV2.class.getName()); + conf.setStrings(key, coprocessor, coprocessor, coprocessor, + SimpleRegionObserverV2.class.getName()); host.loadSystemCoprocessors(conf, key); // Two coprocessors(SimpleRegionObserver and SimpleRegionObserverV2) loaded - Assert.assertEquals(2, host.coprocessors.size()); + Assert.assertEquals(2, host.coprocEnvironments.size()); // Check the priority value - CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment(SimpleRegionObserver.class.getName()); - CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName()); + CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment( + SimpleRegionObserver.class.getName()); + CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment( + SimpleRegionObserverV2.class.getName()); assertNotNull(simpleEnv); assertNotNull(simpleEnv_v2); assertEquals(Coprocessor.PRIORITY_SYSTEM, simpleEnv.getPriority()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index 5cf0bb378e0..3ff0f1f7463 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; @@ -62,7 +63,6 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Rule; @@ -149,7 +149,7 @@ public class TestCoprocessorInterface { } } - public static class CoprocessorImpl implements RegionObserver { + public static class CoprocessorImpl implements RegionCoprocessor, RegionObserver { private boolean startCalled; private boolean stopCalled; @@ -177,6 +177,11 @@ public class TestCoprocessorInterface { stopCalled = true; } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preOpen(ObserverContext e) { preOpenCalled = true; @@ -242,23 +247,31 @@ public class TestCoprocessorInterface { } } - public static class CoprocessorII implements RegionObserver { + public static class CoprocessorII implements RegionCoprocessor { private ConcurrentMap sharedData; + @Override public void start(CoprocessorEnvironment e) { sharedData = ((RegionCoprocessorEnvironment)e).getSharedData(); sharedData.putIfAbsent("test2", new Object()); } + @Override public void stop(CoprocessorEnvironment e) { sharedData = null; } + @Override - public void preGetOp(final ObserverContext e, - final Get get, final List results) throws IOException { - if (1/0 == 1) { - e.complete(); - } + public Optional getRegionObserver() { + return Optional.of(new RegionObserver() { + @Override + public void preGetOp(final ObserverContext e, + final Get get, final List results) throws IOException { + if (1/0 == 1) { + e.complete(); + } + } + }); } Map getSharedData() { @@ -272,8 +285,7 @@ public class TestCoprocessorInterface { byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initConfig(); - Region region = initHRegion(tableName, name.getMethodName(), hc, - new Class[]{}, families); + Region region = initHRegion(tableName, name.getMethodName(), hc, new Class[]{}, families); for (int i = 0; i < 3; i++) { HBaseTestCase.addContent(region, fam3); @@ -284,18 +296,16 @@ public class TestCoprocessorInterface { region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); - Coprocessor c = region.getCoprocessorHost(). - findCoprocessor(CoprocessorImpl.class.getName()); - Coprocessor c2 = region.getCoprocessorHost(). - findCoprocessor(CoprocessorII.class.getName()); + Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); + Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); Object o = ((CoprocessorImpl)c).getSharedData().get("test1"); Object o2 = ((CoprocessorII)c2).getSharedData().get("test2"); assertNotNull(o); assertNotNull(o2); // to coprocessors get different sharedDatas assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData()); - c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName()); - c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()); + c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); + c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // make sure that all coprocessor of a class have identical sharedDatas assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2); @@ -312,21 +322,18 @@ public class TestCoprocessorInterface { fail(); } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) { } - assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName())); - c = region.getCoprocessorHost(). - findCoprocessor(CoprocessorImpl.class.getName()); + assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class)); + c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); c = c2 = null; // perform a GC System.gc(); // reopen the region region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); - c = region.getCoprocessorHost(). - findCoprocessor(CoprocessorImpl.class.getName()); + c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); // CPimpl is unaffected, still the same reference assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o); - c2 = region.getCoprocessorHost(). - findCoprocessor(CoprocessorII.class.getName()); + c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); // new map and object created, hence the reference is different // hence the old entry was indeed removed by the GC and new one has been created Object o3 = ((CoprocessorII)c2).getSharedData().get("test2"); @@ -357,8 +364,7 @@ public class TestCoprocessorInterface { scanner.next(new ArrayList<>()); HBaseTestingUtility.closeRegionAndWAL(region); - Coprocessor c = region.getCoprocessorHost(). - findCoprocessor(CoprocessorImpl.class.getName()); + Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted()); assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped()); @@ -382,7 +388,7 @@ public class TestCoprocessorInterface { ((HRegion)r).setCoprocessorHost(host); for (Class implClass : implClasses) { - host.load(implClass, Coprocessor.PRIORITY_USER, conf); + host.load((Class) implClass, Coprocessor.PRIORITY_USER, conf); } // we need to manually call pre- and postOpen here since the // above load() is not the real case for CP loading. A CP is @@ -412,7 +418,7 @@ public class TestCoprocessorInterface { ((HRegion)r).setCoprocessorHost(host); for (Class implClass : implClasses) { - host.load(implClass, Coprocessor.PRIORITY_USER, conf); + host.load((Class) implClass, Coprocessor.PRIORITY_USER, conf); Coprocessor c = host.findCoprocessor(implClass.getName()); assertNotNull(c); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java index b4e10d9e488..fbcd1d5dc1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java @@ -95,7 +95,7 @@ public class TestCoprocessorMetrics { /** * MasterObserver that has a Timer metric for create table operation. */ - public static class CustomMasterObserver implements MasterObserver { + public static class CustomMasterObserver implements MasterCoprocessor, MasterObserver { private Timer createTableTimer; private long start = Long.MIN_VALUE; @@ -125,14 +125,25 @@ public class TestCoprocessorMetrics { createTableTimer = registry.timer("CreateTable"); } } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } } /** * RegionServerObserver that has a Counter for rollWAL requests. */ - public static class CustomRegionServerObserver implements RegionServerObserver { + public static class CustomRegionServerObserver implements RegionServerCoprocessor, + RegionServerObserver { /** This is the Counter metric object to keep track of the current count across invocations */ private Counter rollWALCounter; + + @Override public Optional getRegionServerObserver() { + return Optional.of(this); + } + @Override public void postRollWALWriterRequest(ObserverContext ctx) throws IOException { @@ -156,7 +167,7 @@ public class TestCoprocessorMetrics { /** * WALObserver that has a Counter for walEdits written. */ - public static class CustomWALObserver implements WALObserver { + public static class CustomWALObserver implements WALCoprocessor, WALObserver { private Counter walEditsCount; @Override @@ -177,12 +188,16 @@ public class TestCoprocessorMetrics { } } } + + @Override public Optional getWALObserver() { + return Optional.of(this); + } } /** * RegionObserver that has a Counter for preGet() */ - public static class CustomRegionObserver implements RegionObserver { + public static class CustomRegionObserver implements RegionCoprocessor, RegionObserver { private Counter preGetCounter; @Override @@ -191,6 +206,11 @@ public class TestCoprocessorMetrics { preGetCounter.increment(); } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java index 2ef13f77d86..1d8acdfe3ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java @@ -49,7 +49,7 @@ public class TestCoprocessorStop { private static final String REGIONSERVER_FILE = "regionserver" + System.currentTimeMillis(); - public static class FooCoprocessor implements Coprocessor { + public static class FooCoprocessor implements MasterCoprocessor, RegionServerCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { String where = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java index 326b3c0a1db..b3fdb3eb632 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; @@ -87,7 +88,11 @@ public class TestHTableWrapper { private static final byte[] bytes4 = Bytes.toBytes(4); private static final byte[] bytes5 = Bytes.toBytes(5); - static class DummyRegionObserver implements RegionObserver { + public static class DummyRegionObserver implements MasterCoprocessor, MasterObserver { + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } } private Table hTableInterface; @@ -135,14 +140,14 @@ public class TestHTableWrapper { public void testHTableInterfaceMethods() throws Exception { Configuration conf = util.getConfiguration(); MasterCoprocessorHost cpHost = util.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); - Class implClazz = DummyRegionObserver.class; + Class implClazz = DummyRegionObserver.class; cpHost.load(implClazz, Coprocessor.PRIORITY_HIGHEST, conf); CoprocessorEnvironment env = cpHost.findCoprocessorEnvironment(implClazz.getName()); assertEquals(Coprocessor.VERSION, env.getVersion()); assertEquals(VersionInfo.getVersion(), env.getHBaseVersion()); hTableInterface = env.getTable(TEST_TABLE); checkHTableInterfaceMethods(); - cpHost.shutdown(env); + cpHost.shutdown((MasterCoprocessorEnvironment) env); } private void checkHTableInterfaceMethods() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index ab3dec71f41..0595a677116 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -97,12 +98,17 @@ public class TestMasterCoprocessorExceptionWithAbort { } } - public static class BuggyMasterObserver implements MasterObserver { + public static class BuggyMasterObserver implements MasterCoprocessor, MasterObserver { private boolean preCreateTableCalled; private boolean postCreateTableCalled; private boolean startCalled; private boolean postStartMasterCalled; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void postCreateTable(ObserverContext env, TableDescriptor desc, RegionInfo[] regions) throws IOException { @@ -163,8 +169,7 @@ public class TestMasterCoprocessorExceptionWithAbort { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - BuggyMasterObserver cp = (BuggyMasterObserver)host.findCoprocessor( - BuggyMasterObserver.class.getName()); + BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class); assertFalse("No table created yet", cp.wasCreateTableCalled()); // set a watch on the zookeeper /hbase/master node. If the master dies, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java index ccd777f9c6d..d4c6e4f8b59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -73,12 +74,17 @@ public class TestMasterCoprocessorExceptionWithRemove { } } - public static class BuggyMasterObserver implements MasterObserver { + public static class BuggyMasterObserver implements MasterCoprocessor, MasterObserver { private boolean preCreateTableCalled; private boolean postCreateTableCalled; private boolean startCalled; private boolean postStartMasterCalled; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @SuppressWarnings("null") @Override public void postCreateTable(ObserverContext env, @@ -144,8 +150,7 @@ public class TestMasterCoprocessorExceptionWithRemove { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - BuggyMasterObserver cp = (BuggyMasterObserver)host.findCoprocessor( - BuggyMasterObserver.class.getName()); + BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class); assertFalse("No table created yet", cp.wasCreateTableCalled()); // Set a watch on the zookeeper /hbase/master node. If the master dies, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 1e36b0bbbca..b038d9d1b4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -95,7 +96,7 @@ public class TestMasterObserver { public static CountDownLatch tableCreationLatch = new CountDownLatch(1); public static CountDownLatch tableDeletionLatch = new CountDownLatch(1); - public static class CPMasterObserver implements MasterObserver { + public static class CPMasterObserver implements MasterCoprocessor, MasterObserver { private boolean bypass = false; private boolean preCreateTableCalled; @@ -282,6 +283,11 @@ public class TestMasterObserver { postLockHeartbeatCalled = false; } + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preMergeRegions( final ObserverContext ctx, @@ -1503,8 +1509,7 @@ public class TestMasterObserver { assertTrue("Master should be active", master.isActiveMaster()); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); assertNotNull("CoprocessorHost should not be null", host); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp); // check basic lifecycle @@ -1521,8 +1526,7 @@ public class TestMasterObserver { final TableName tableName = TableName.valueOf(name.getMethodName()); HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.enableBypass(true); cp.resetStates(); assertFalse("No table created yet", cp.wasCreateTableCalled()); @@ -1698,8 +1702,7 @@ public class TestMasterObserver { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); // create a table @@ -1760,8 +1763,7 @@ public class TestMasterObserver { String testNamespace = "observed_ns"; HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.enableBypass(false); cp.resetStates(); @@ -1866,8 +1868,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.enableBypass(false); cp.resetStates(); @@ -1955,8 +1956,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); GetTableDescriptorsRequest req = @@ -1973,8 +1973,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); master.getMasterRpcServices().getTableNames(null, @@ -1989,8 +1988,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); master.abortProcedure(1, true); @@ -2005,8 +2003,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); master.getProcedures(); @@ -2021,8 +2018,7 @@ public class TestMasterObserver { HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); - CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class); cp.resetStates(); master.getLocks(); @@ -2043,8 +2039,7 @@ public class TestMasterObserver { @Test public void testQueueLockAndLockHeartbeatOperations() throws Exception { HMaster master = UTIL.getMiniHBaseCluster().getMaster(); - CPMasterObserver cp = (CPMasterObserver)master.getMasterCoprocessorHost().findCoprocessor( - CPMasterObserver.class.getName()); + CPMasterObserver cp = master.getMasterCoprocessorHost().findCoprocessor(CPMasterObserver.class); cp.resetStates(); final TableName tableName = TableName.valueOf("testLockedTable"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index 5cd1fca37d9..dabf20bc94c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -41,6 +41,7 @@ import org.junit.experimental.categories.Category; import java.io.IOException; import java.util.Collections; +import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -63,7 +64,12 @@ public class TestOpenTableInCoprocessor { /** * Custom coprocessor that just copies the write to another table. */ - public static class SendToOtherTableCoprocessor implements RegionObserver { + public static class SendToOtherTableCoprocessor implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public void prePut(final ObserverContext e, final Put put, @@ -80,7 +86,7 @@ public class TestOpenTableInCoprocessor { /** * Coprocessor that creates an HTable with a pool to write to another table */ - public static class CustomThreadPoolCoprocessor implements RegionObserver { + public static class CustomThreadPoolCoprocessor implements RegionCoprocessor, RegionObserver { /** * Get a pool that has only ever one thread. A second action added to the pool (running @@ -97,6 +103,11 @@ public class TestOpenTableInCoprocessor { return pool; } + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java index 8d554301b2c..050ea36fde7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverBypass.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -204,7 +205,12 @@ public class TestRegionObserverBypass { t.delete(d); } - public static class TestCoprocessor implements RegionObserver { + public static class TestCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java index b168be76a8b..88979576033 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -194,7 +195,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { } } - public static class TestMultiMutationCoprocessor implements RegionObserver { + public static class TestMultiMutationCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -211,7 +217,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { } } - public static class TestDeleteCellCoprocessor implements RegionObserver { + public static class TestDeleteCellCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -230,7 +241,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { } } - public static class TestDeleteFamilyCoprocessor implements RegionObserver { + public static class TestDeleteFamilyCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -249,7 +265,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { } } - public static class TestDeleteRowCoprocessor implements RegionObserver { + public static class TestDeleteRowCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void preBatchMutate(ObserverContext c, MiniBatchOperationInProgress miniBatchOp) throws IOException { @@ -268,8 +289,14 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors { } } - public static class TestWALObserver implements WALObserver { + public static class TestWALObserver implements WALCoprocessor, WALObserver { static WALEdit savedEdit = null; + + @Override + public Optional getWALObserver() { + return Optional.of(this); + } + @Override public void postWALWrite(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 0641b567598..2666340fa3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -411,10 +412,15 @@ public class TestRegionObserverInterface { } /* Overrides compaction to only output rows with keys that are even numbers */ - public static class EvenOnlyCompactor implements RegionObserver { + public static class EvenOnlyCompactor implements RegionCoprocessor, RegionObserver { long lastCompaction; long lastFlush; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public InternalScanner preCompact(ObserverContext e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) { @@ -494,8 +500,7 @@ public class TestRegionObserverInterface { } HRegion firstRegion = cluster.getRegions(compactTable).get(0); - Coprocessor cp = - firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName()); + Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class); assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp); EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp; @@ -656,7 +661,7 @@ public class TestRegionObserverInterface { } // check each region whether the coprocessor upcalls are called or not. - private void verifyMethodResult(Class c, String methodName[], TableName tableName, + private void verifyMethodResult(Class coprocessor, String methodName[], TableName tableName, Object value[]) throws IOException { try { for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { @@ -671,14 +676,14 @@ public class TestRegionObserverInterface { RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost(); - Coprocessor cp = cph.findCoprocessor(c.getName()); + Coprocessor cp = cph.findCoprocessor(coprocessor.getName()); assertNotNull(cp); for (int i = 0; i < methodName.length; ++i) { - Method m = c.getMethod(methodName[i]); + Method m = coprocessor.getMethod(methodName[i]); Object o = m.invoke(cp); - assertTrue("Result of " + c.getName() + "." + methodName[i] + " is expected to be " - + value[i].toString() + ", while we get " + o.toString(), - o.equals(value[i])); + assertTrue("Result of " + coprocessor.getName() + "." + methodName[i] + + " is expected to be " + value[i].toString() + ", while we get " + + o.toString(), o.equals(value[i])); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 7b24c1fb6c2..4c8c4cedd55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertNull; import java.io.IOException; import java.util.List; import java.util.NavigableSet; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.conf.Configuration; @@ -103,13 +104,22 @@ public class TestRegionObserverScannerOpenHook { /** * Do the default logic in {@link RegionObserver} interface. */ - public static class EmptyRegionObsever implements RegionObserver { + public static class EmptyRegionObsever implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } } /** * Don't return any data from a scan by creating a custom {@link StoreScanner}. */ - public static class NoDataFromScan implements RegionObserver { + public static class NoDataFromScan implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public KeyValueScanner preStoreScannerOpen(ObserverContext c, Store store, Scan scan, NavigableSet targetCols, KeyValueScanner s, long readPt) @@ -137,7 +147,11 @@ public class TestRegionObserverScannerOpenHook { /** * Don't allow any data in a flush by creating a custom {@link StoreScanner}. */ - public static class NoDataFromFlush implements RegionObserver { + public static class NoDataFromFlush implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public InternalScanner preFlushScannerOpen(ObserverContext c, @@ -152,7 +166,12 @@ public class TestRegionObserverScannerOpenHook { * Don't allow any data to be written out in the compaction by creating a custom * {@link StoreScanner}. */ - public static class NoDataFromCompaction implements RegionObserver { + public static class NoDataFromCompaction implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public InternalScanner preCompactScannerOpen(ObserverContext c, Store store, List scanners, ScanType scanType, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index 702155b8685..1277ccc853f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.util.Optional; import junit.framework.TestCase; @@ -50,8 +51,14 @@ public class TestRegionObserverStacking extends TestCase { = new HBaseTestingUtility(); static final Path DIR = TEST_UTIL.getDataTestDir(); - public static class ObserverA implements RegionObserver { + public static class ObserverA implements RegionCoprocessor, RegionObserver { long id; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postPut(final ObserverContext c, final Put put, final WALEdit edit, @@ -65,8 +72,14 @@ public class TestRegionObserverStacking extends TestCase { } } - public static class ObserverB implements RegionObserver { + public static class ObserverB implements RegionCoprocessor, RegionObserver { long id; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postPut(final ObserverContext c, final Put put, final WALEdit edit, @@ -80,9 +93,14 @@ public class TestRegionObserverStacking extends TestCase { } } - public static class ObserverC implements RegionObserver { + public static class ObserverC implements RegionCoprocessor, RegionObserver { long id; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postPut(final ObserverContext c, final Put put, final WALEdit edit, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java index ae460039758..b4644cbad31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java @@ -119,7 +119,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort { // it will abort. boolean aborted = false; for (int i = 0; i < 10; i++) { - aborted = regionServer.isAborted(); + aborted = regionServer.isAborted(); if (aborted) { break; } @@ -137,7 +137,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort { } } - public static class FailedInitializationObserver extends SimpleRegionObserver { + public static class FailedInitializationObserver implements RegionServerCoprocessor { @SuppressWarnings("null") @Override public void start(CoprocessorEnvironment e) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index d701413165e..9e140f02291 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -109,9 +109,9 @@ public class TestWALObserver { public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor.class.getName()); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor.class.getName()); conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(1); @@ -173,10 +173,10 @@ public class TestWALObserver { @Test public void testWALObserverWriteToWAL() throws Exception { final WAL log = wals.getWAL(UNSPECIFIED_REGION, null); - verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALObserver.class), false); + verifyWritesSeen(log, getCoprocessor(log, SampleRegionWALCoprocessor.class), false); } - private void verifyWritesSeen(final WAL log, final SampleRegionWALObserver cp, + private void verifyWritesSeen(final WAL log, final SampleRegionWALCoprocessor cp, final boolean seesLegacy) throws Exception { HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); final HTableDescriptor htd = createBasic3FamilyHTD(Bytes @@ -277,7 +277,7 @@ public class TestWALObserver { } WAL log = wals.getWAL(UNSPECIFIED_REGION, null); try { - SampleRegionWALObserver cp = getCoprocessor(log, SampleRegionWALObserver.class); + SampleRegionWALCoprocessor cp = getCoprocessor(log, SampleRegionWALCoprocessor.class); cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null); @@ -354,9 +354,8 @@ public class TestWALObserver { hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null); long seqid2 = region.getOpenSeqNum(); - SampleRegionWALObserver cp2 = - (SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor( - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor cp2 = + region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class); // TODO: asserting here is problematic. assertNotNull(cp2); assertTrue(cp2.isPreWALRestoreCalled()); @@ -376,13 +375,13 @@ public class TestWALObserver { @Test public void testWALObserverLoaded() throws Exception { WAL log = wals.getWAL(UNSPECIFIED_REGION, null); - assertNotNull(getCoprocessor(log, SampleRegionWALObserver.class)); + assertNotNull(getCoprocessor(log, SampleRegionWALCoprocessor.class)); } @Test public void testWALObserverRoll() throws Exception { final WAL wal = wals.getWAL(UNSPECIFIED_REGION, null); - final SampleRegionWALObserver cp = getCoprocessor(wal, SampleRegionWALObserver.class); + final SampleRegionWALCoprocessor cp = getCoprocessor(wal, SampleRegionWALCoprocessor.class); cp.setTestValues(TEST_TABLE, null, null, null, null, null, null, null); assertFalse(cp.isPreWALRollCalled()); @@ -393,11 +392,11 @@ public class TestWALObserver { assertTrue(cp.isPostWALRollCalled()); } - private SampleRegionWALObserver getCoprocessor(WAL wal, - Class clazz) throws Exception { + private SampleRegionWALCoprocessor getCoprocessor(WAL wal, + Class clazz) throws Exception { WALCoprocessorHost host = wal.getCoprocessorHost(); Coprocessor c = host.findCoprocessor(clazz.getName()); - return (SampleRegionWALObserver) c; + return (SampleRegionWALCoprocessor) c; } /* diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index 8f0c5d6e283..e0d9fa24bbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; @@ -69,6 +70,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.HFileLink; @@ -719,7 +721,12 @@ public class TestMobCompactor { * This copro overwrites the default compaction policy. It always chooses two latest hfiles and * compacts them into a new one. */ - public static class CompactTwoLatestHfilesCopro implements RegionObserver { + public static class CompactTwoLatestHfilesCopro implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } @Override public void preCompactSelection(ObserverContext c, Store store, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index c3627f7ffef..47e37833057 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.namespace; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -52,11 +53,14 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -278,7 +282,8 @@ public class TestNamespaceAuditor { assertNull("Namespace state not found to be null.", stateInfo); } - public static class CPRegionServerObserver implements RegionServerObserver { + public static class CPRegionServerObserver + implements RegionServerCoprocessor, RegionServerObserver { private volatile boolean shouldFailMerge = false; public void failMerge(boolean fail) { @@ -292,15 +297,25 @@ public class TestNamespaceAuditor { wait(); } } + + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } } - public static class CPMasterObserver implements MasterObserver { + public static class CPMasterObserver implements MasterCoprocessor, MasterObserver { private volatile boolean shouldFailMerge = false; public void failMerge(boolean fail) { shouldFailMerge = fail; } + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public synchronized void preMergeRegionsAction( final ObserverContext ctx, @@ -353,7 +368,7 @@ public class TestNamespaceAuditor { // Fail region merge through Coprocessor hook MiniHBaseCluster cluster = UTIL.getHBaseCluster(); MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost(); - Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName()); + Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class); CPMasterObserver masterObserver = (CPMasterObserver) coprocessor; masterObserver.failMerge(true); @@ -445,7 +460,7 @@ public class TestNamespaceAuditor { return Bytes.toBytes("" + key); } - public static class CustomObserver implements RegionObserver { + public static class CustomObserver implements RegionCoprocessor, RegionObserver { volatile CountDownLatch postCompact; @Override @@ -458,6 +473,11 @@ public class TestNamespaceAuditor { public void start(CoprocessorEnvironment e) throws IOException { postCompact = new CountDownLatch(1); } + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } } @Test @@ -522,10 +542,15 @@ public class TestNamespaceAuditor { .getMasterQuotaManager().getNamespaceQuotaManager(); } - public static class MasterSyncObserver implements MasterObserver { + public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { volatile CountDownLatch tableDeletionLatch; static boolean throwExceptionInPreCreateTableAction; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preDeleteTable(ObserverContext ctx, TableName tableName) throws IOException { @@ -551,8 +576,8 @@ public class TestNamespaceAuditor { private void deleteTable(final TableName tableName) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)UTIL.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + MasterSyncObserver observer = UTIL.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); ADMIN.deleteTable(tableName); observer.tableDeletionLatch.await(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java index eecc06921c9..20468169f30 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java @@ -21,12 +21,14 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; import java.util.NavigableSet; +import java.util.Optional; import java.util.OptionalInt; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TestFromClientSideWithCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; @@ -38,7 +40,12 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTrack * {@link TestCompactionWithCoprocessor} to make sure that a wide range * of functionality still behaves as expected. */ -public class NoOpScanPolicyObserver implements RegionObserver { +public class NoOpScanPolicyObserver implements RegionCoprocessor, RegionObserver { + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } /** * Reimplement the default behavior diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index af64be69a1f..8c604bf4cdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.client.ClientServiceCallable; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Result; @@ -251,8 +252,14 @@ public class TestHRegionServerBulkLoad { } } - public static class MyObserver implements RegionObserver { + public static class MyObserver implements RegionCoprocessor, RegionObserver { static int sleepDuration; + + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public InternalScanner preCompact(ObserverContext e, Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java index 9928a7737f1..1e3db70862e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java @@ -36,8 +36,10 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.master.HMaster; @@ -55,6 +57,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import java.io.IOException; +import java.util.Optional; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -160,10 +163,21 @@ public class TestRegionServerAbort { assertFalse(cluster.getRegionServer(0).isStopped()); } - public static class StopBlockingRegionObserver implements RegionServerObserver, RegionObserver { + public static class StopBlockingRegionObserver + implements RegionServerCoprocessor, RegionCoprocessor, RegionServerObserver, RegionObserver { public static final String DO_ABORT = "DO_ABORT"; private boolean stopAllowed; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + @Override + public Optional getRegionServerObserver() { + return Optional.of(this); + } + @Override public void prePut(ObserverContext c, Put put, WALEdit edit, Durability durability) throws IOException { @@ -185,10 +199,6 @@ public class TestRegionServerAbort { public void setStopAllowed(boolean allowed) { this.stopAllowed = allowed; } - - public boolean isStopAllowed() { - return stopAllowed; - } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java index 67796d14242..bd63babdf60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRetriableFailure.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -64,9 +66,14 @@ public class TestScannerRetriableFailure { @Rule public TestTableName TEST_TABLE = new TestTableName(); - public static class FaultyScannerObserver implements RegionObserver { + public static class FaultyScannerObserver implements RegionCoprocessor, RegionObserver { private int faults = 0; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public boolean preScannerNext(final ObserverContext e, final InternalScanner s, final List results, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java index 6cfff43d222..51fff854cfb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.hfile.CorruptHFileException; @@ -65,7 +67,12 @@ public class TestScannerWithCorruptHFile { TEST_UTIL.shutdownMiniCluster(); } - public static class CorruptHFileCoprocessor implements RegionObserver { + public static class CorruptHFileCoprocessor implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public boolean preScannerNext(ObserverContext e, InternalScanner s, List results, int limit, boolean hasMore) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java index 993a7183a39..1cb6bfc2da4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -68,9 +70,14 @@ public class TestSettingTimeoutOnBlockingPoint { TEST_UTIL.shutdownMiniCluster(); } - public static class SleepCoprocessor implements RegionObserver { + public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver { public static final int SLEEP_TIME = 10000; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public Result preIncrementAfterRowLock(final ObserverContext e, final Increment increment) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index c2c317120f3..57f50b658bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -207,8 +208,7 @@ public class TestSplitTransactionOnCluster { // we have to wait until the SPLITTING state is seen by the master FailingSplitMasterObserver observer = - (FailingSplitMasterObserver) master.getMasterCoprocessorHost().findCoprocessor( - FailingSplitMasterObserver.class.getName()); + master.getMasterCoprocessorHost().findCoprocessor(FailingSplitMasterObserver.class); assertNotNull(observer); observer.latch.await(); @@ -269,12 +269,19 @@ public class TestSplitTransactionOnCluster { assertEquals(2, cluster.getRegions(tableName).size()); } - public static class FailingSplitMasterObserver implements MasterObserver { + public static class FailingSplitMasterObserver implements MasterCoprocessor, MasterObserver { volatile CountDownLatch latch; + @Override public void start(CoprocessorEnvironment e) throws IOException { latch = new CountDownLatch(1); } + + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preSplitRegionBeforePONRAction( final ObserverContext ctx, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index ca452b44d36..9a721001712 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -543,11 +545,16 @@ public class TestTags { } } - public static class TestCoprocessorForTags implements RegionObserver { + public static class TestCoprocessorForTags implements RegionCoprocessor, RegionObserver { public static volatile boolean checkTagPresence = false; public static List tags = null; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 6242485fb08..bc66c332989 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver; +import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.util.Bytes; @@ -110,7 +110,7 @@ public abstract class AbstractTestFSWAL { TEST_UTIL.getConfiguration().setInt("dfs.client.block.recovery.retries", 1); TEST_UTIL.getConfiguration().setInt("hbase.ipc.client.connection.maxidletime", 500); TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor.class.getName()); TEST_UTIL.startMiniDFSCluster(3); CONF = TEST_UTIL.getConfiguration(); @@ -141,7 +141,7 @@ public abstract class AbstractTestFSWAL { wal = newWAL(FS, FSUtils.getWALRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); WALCoprocessorHost host = wal.getCoprocessorHost(); - Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName()); + Coprocessor c = host.findCoprocessor(SampleRegionWALCoprocessor.class); assertNotNull(c); } finally { if (wal != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java index 64ed2bc0136..2f3e9b94f43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver; +import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -101,7 +101,7 @@ public abstract class AbstractTestProtobufLog { TEST_UTIL.getConfiguration().setInt( "hbase.ipc.client.connection.maxidletime", 500); TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor.class.getName()); TEST_UTIL.startMiniDFSCluster(3); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index e3a5b192085..457a5d062d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; import java.util.List; +import java.util.Optional; import java.util.Random; import java.util.concurrent.CountDownLatch; @@ -60,6 +61,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -714,10 +716,15 @@ public class TestMasterReplication { * Use a coprocessor to count puts and deletes. as KVs would be replicated back with the same * timestamp there is otherwise no way to count them. */ - public static class CoprocessorCounter implements RegionObserver { + public static class CoprocessorCounter implements RegionCoprocessor, RegionObserver { private int nCount = 0; private int nDelete = 0; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java index 5e7fef1eb5a..e2a393acd87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java @@ -24,6 +24,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -53,6 +54,7 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.wal.WALEdit; @@ -197,7 +199,12 @@ public class TestReplicationWithTags { } } - public static class TestCoprocessorForTagsAtSource implements RegionObserver { + public static class TestCoprocessorForTagsAtSource implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException { @@ -230,9 +237,14 @@ public class TestReplicationWithTags { } } - public static class TestCoprocessorForTagsAtSink implements RegionObserver { + public static class TestCoprocessorForTagsAtSink implements RegionCoprocessor, RegionObserver { public static List tags = null; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postGetOp(ObserverContext e, Get get, List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index ebe4bb1fc8f..1089b7a4e04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import java.io.IOException; +import java.util.Optional; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.WALCoprocessor; import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.WALObserver; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -148,10 +150,16 @@ public class TestRegionReplicaReplicationEndpointNoMaster { static ConcurrentLinkedQueue entries = new ConcurrentLinkedQueue<>(); - public static class WALEditCopro implements WALObserver { + public static class WALEditCopro implements WALCoprocessor, WALObserver { public WALEditCopro() { entries.clear(); } + + @Override + public Optional getWALObserver() { + return Optional.of(this); + } + @Override public void postWALWrite(ObserverContext ctx, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java index afb14fe0dd5..0a8c5514726 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java @@ -24,6 +24,7 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; @@ -50,6 +51,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -304,8 +306,7 @@ public class SecureTestUtil { List result = Lists.newArrayList(); for (RegionServerThread t: cluster.getLiveRegionServerThreads()) { for (Region region: t.getRegionServer().getOnlineRegionsLocalContext()) { - Coprocessor cp = region.getCoprocessorHost() - .findCoprocessor(AccessController.class.getName()); + Coprocessor cp = region.getCoprocessorHost().findCoprocessor(AccessController.class); if (cp != null) { result.add((AccessController)cp); } @@ -622,10 +623,15 @@ public class SecureTestUtil { }); } - public static class MasterSyncObserver implements MasterObserver { + public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { volatile CountDownLatch tableCreationLatch = null; volatile CountDownLatch tableDeletionLatch = null; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void postCompletedCreateTableAction( final ObserverContext ctx, @@ -678,8 +684,8 @@ public class SecureTestUtil { byte[][] splitKeys) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); observer.tableCreationLatch = new CountDownLatch(1); if (splitKeys != null) { admin.createTable(htd, splitKeys); @@ -710,8 +716,8 @@ public class SecureTestUtil { throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class); observer.tableDeletionLatch = new CountDownLatch(1); try { admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index d38c20d6e7f..05776ff5eb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -36,6 +36,7 @@ import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -76,9 +77,9 @@ import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest; @@ -111,7 +112,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; @@ -227,13 +227,11 @@ public class TestAccessController extends SecureTestUtil { MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); - ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); - CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + ACCESS_CONTROLLER = cpHost.findCoprocessor(AccessController.class); + CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); - RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); @@ -279,8 +277,7 @@ public class TestAccessController extends SecureTestUtil { Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); - RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Set up initial grants @@ -2558,8 +2555,7 @@ public class TestAccessController extends SecureTestUtil { } - public static class PingCoprocessor extends PingService implements Coprocessor, - CoprocessorService { + public static class PingCoprocessor extends PingService implements RegionCoprocessor { @Override public void start(CoprocessorEnvironment env) throws IOException { } @@ -2568,8 +2564,8 @@ public class TestAccessController extends SecureTestUtil { public void stop(CoprocessorEnvironment env) throws IOException { } @Override - public Service getService() { - return this; + public Optional getService() { + return Optional.of(this); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java index c39e1192fee..ad8cb140675 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java @@ -485,14 +485,13 @@ public class TestAccessController2 extends SecureTestUtil { MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(MyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); - AccessController ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor( - MyAccessController.class.getName()); + AccessController ACCESS_CONTROLLER = cpHost.findCoprocessor(MyAccessController.class); MasterCoprocessorEnvironment CP_ENV = cpHost.createEnvironment( - MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); + ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); RegionServerCoprocessorEnvironment RSCP_ENV = rsHost.createEnvironment( - MyAccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); + ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); } @Test (timeout=180000) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java index d8666b6bf79..40865ab20e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java @@ -120,7 +120,7 @@ public class TestAccessController3 extends SecureTestUtil { private static AccessController ACCESS_CONTROLLER; private static RegionServerCoprocessorEnvironment RSCP_ENV; private static RegionCoprocessorEnvironment RCP_ENV; - + private static boolean callSuperTwice = true; @Rule @@ -161,15 +161,13 @@ public class TestAccessController3 extends SecureTestUtil { TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(FaultyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(accessControllerClassName); - CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost; do { rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); } while (rsHost == null); - RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); @@ -219,8 +217,7 @@ public class TestAccessController3 extends SecureTestUtil { Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); - RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Set up initial grants diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index 1570dd65341..f626b07f499 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -110,12 +110,11 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() .getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); - AccessController ac = (AccessController) - cpHost.findCoprocessor(AccessController.class.getName()); - cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + AccessController ac = cpHost.findCoprocessor(AccessController.class); + cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); - rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index 9e3976b7b84..3aa97b74977 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -113,12 +113,11 @@ public class TestCellACLs extends SecureTestUtil { MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() .getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); - AccessController ac = (AccessController) - cpHost.findCoprocessor(AccessController.class.getName()); - cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + AccessController ac = cpHost.findCoprocessor(AccessController.class); + cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); - rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java index bc74f4f82e0..ac13c8b5153 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; @@ -290,7 +292,13 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { admin.listTables("^" + TEST_TABLE.getNameAsString() + "$")); } - public static class TestRegionObserver implements RegionObserver {} + public static class TestRegionObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + + } /** * Test a table creation including a coprocessor path diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java index dad071ac65a..f15d6a03782 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java @@ -155,8 +155,8 @@ public class TestNamespaceCommands extends SecureTestUtil { // on an arbitrary server. for (JVMClusterUtil.RegionServerThread rst: UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()) { - ACCESS_CONTROLLER = (AccessController)rst.getRegionServer().getRegionServerCoprocessorHost(). - findCoprocessor(AccessController.class.getName()); + ACCESS_CONTROLLER = rst.getRegionServer().getRegionServerCoprocessorHost(). + findCoprocessor(AccessController.class); if (ACCESS_CONTROLLER != null) break; } if (ACCESS_CONTROLLER == null) throw new NullPointerException(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java index a1def954bc1..f60209fff7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java @@ -96,10 +96,10 @@ public class TestScanEarlyTermination extends SecureTestUtil { cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); AccessController ac = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); - cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + cpHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); - rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); + rsHost.createEnvironment(ac, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java index 7e61d241c83..fad63a104a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -112,7 +112,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { @Rule public TestTableName TEST_TABLE = new TestTableName(); // default users - + // superuser private static User SUPERUSER; // user granted with all global permission @@ -153,12 +153,10 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); - CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); - RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, - Coprocessor.PRIORITY_HIGHEST, 1, conf); + RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); @@ -193,7 +191,7 @@ public class TestWithDisabledAuthorization extends SecureTestUtil { Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); - RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, + RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, TEST_UTIL.getConfiguration()); // Set up initial grants diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 780a8e0baa7..56a66e93a93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -49,7 +49,9 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; import org.apache.hadoop.hbase.ipc.NettyRpcServer; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -261,11 +263,16 @@ public class TestTokenAuthentication { final RegionServerServices mockServices = TEST_UTIL.createMockRegionServerService(rpcServer); // mock up coprocessor environment - super.start(new RegionCoprocessorEnvironment() { + super.start( new RegionCoprocessorEnvironment() { @Override public HRegion getRegion() { return null; } @Override + public void startup() throws IOException {} + + @Override + public void shutdown() {} + public CoprocessorRegionServerServices getCoprocessorRegionServerServices() { return mockServices; } @@ -285,7 +292,7 @@ public class TestTokenAuthentication { public String getHBaseVersion() { return null; } @Override - public Coprocessor getInstance() { return null; } + public RegionCoprocessor getInstance() { return null; } @Override public int getPriority() { return 0; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 13d0e3c48a0..398be48c470 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -44,12 +44,10 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.visibility.VisibilityController.VisibilityReplication; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 25521935e6c..a3d7507fc88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -59,6 +60,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; @@ -66,7 +68,6 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.visibility.VisibilityController.VisibilityReplication; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; @@ -393,7 +394,12 @@ public class TestVisibilityLabelsReplication { // A simple BaseRegionbserver impl that allows to add a non-visibility tag from the // attributes of the Put mutation. The existing cells in the put mutation is overwritten // with a new cell that has the visibility tags and the non visibility tag - public static class SimpleCP implements RegionObserver { + public static class SimpleCP implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void prePut(ObserverContext e, Put m, WALEdit edit, Durability durability) throws IOException { @@ -422,9 +428,14 @@ public class TestVisibilityLabelsReplication { } } - public static class TestCoprocessorForTagsAtSink implements RegionObserver { + public static class TestCoprocessorForTagsAtSink implements RegionCoprocessor, RegionObserver { public static List tags = null; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void postGetOp(ObserverContext e, Get get, List results) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java index bb282284b35..1c6920d5b59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertEquals; import java.io.IOException; +import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -74,10 +76,15 @@ public class TestSnapshotClientRetries { cloneAndAssertOneRetry(snapshotName, TEST_TABLE.getTableName()); } - public static class MasterSyncObserver implements MasterObserver { + public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver { volatile AtomicInteger snapshotCount = null; volatile AtomicInteger cloneCount = null; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void preSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index 58ddc2d69dd..d10e6e7b645 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -25,6 +25,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -58,6 +59,7 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -594,10 +596,15 @@ public class BaseTestHBaseFsck { @org.junit.Rule public TestName name = new TestName(); - public static class MasterSyncObserver implements MasterObserver { + public static class MasterSyncCoprocessor implements MasterCoprocessor, MasterObserver { volatile CountDownLatch tableCreationLatch = null; volatile CountDownLatch tableDeletionLatch = null; + @Override + public Optional getMasterObserver() { + return Optional.of(this); + } + @Override public void postCompletedCreateTableAction( final ObserverContext ctx, @@ -626,16 +633,16 @@ public class BaseTestHBaseFsck { byte [][] splitKeys) throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); - observer.tableCreationLatch = new CountDownLatch(1); + MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class); + coproc.tableCreationLatch = new CountDownLatch(1); if (splitKeys != null) { admin.createTable(htd, splitKeys); } else { admin.createTable(htd); } - observer.tableCreationLatch.await(); - observer.tableCreationLatch = null; + coproc.tableCreationLatch.await(); + coproc.tableCreationLatch = null; testUtil.waitUntilAllRegionsAssigned(htd.getTableName()); } @@ -643,16 +650,16 @@ public class BaseTestHBaseFsck { throws Exception { // NOTE: We need a latch because admin is not sync, // so the postOp coprocessor method may be called after the admin operation returned. - MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() - .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); - observer.tableDeletionLatch = new CountDownLatch(1); + MasterSyncCoprocessor coproc = testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncCoprocessor.class); + coproc.tableDeletionLatch = new CountDownLatch(1); try { admin.disableTable(tableName); } catch (Exception e) { LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); } admin.deleteTable(tableName); - observer.tableDeletionLatch.await(); - observer.tableDeletionLatch = null; + coproc.tableDeletionLatch.await(); + coproc.tableDeletionLatch = null; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index 14c22bf575d..80f12836713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -27,6 +27,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.OptionalInt; import org.apache.hadoop.conf.Configuration; @@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.regionserver.HStore; @@ -213,10 +215,15 @@ public class TestCoprocessorScanPolicy { EnvironmentEdgeManager.reset(); } - public static class ScanObserver implements RegionObserver { + public static class ScanObserver implements RegionCoprocessor, RegionObserver { private Map ttls = new HashMap<>(); private Map versions = new HashMap<>(); + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + // lame way to communicate with the coprocessor, // since it is loaded by a different class loader @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index ca8bc91e950..36612074eba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -50,7 +50,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName()); + MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index d2526abc62d..e8148d78162 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -102,7 +102,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName()); + MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java index ea47fcb4ba5..1971049ba86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java @@ -70,7 +70,7 @@ public class TestHBaseFsckReplicas extends BaseTestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName()); + MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java index 50a5b068632..beef02b505f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java @@ -67,7 +67,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - MasterSyncObserver.class.getName()); + MasterSyncCoprocessor.class.getName()); conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 30); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index 99e983aabd7..f82e8bbfa14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver; +import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost; @@ -137,7 +137,7 @@ public class TestWALFactory { TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.timeout", 10000); TEST_UTIL.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - SampleRegionWALObserver.class.getName()); + SampleRegionWALCoprocessor.class.getName()); TEST_UTIL.startMiniDFSCluster(3); conf = TEST_UTIL.getConfiguration(); @@ -669,7 +669,7 @@ public class TestWALFactory { public void testWALCoprocessorLoaded() throws Exception { // test to see whether the coprocessor is loaded or not. WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION, null).getCoprocessorHost(); - Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName()); + Coprocessor c = host.findCoprocessor(SampleRegionWALCoprocessor.class); assertNotNull(c); } diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java index b988a5823ed..3eec2a676ed 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/ErrorThrowingGetObserver.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; @@ -38,11 +39,17 @@ import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; import java.util.List; +import java.util.Optional; /** * Simple test coprocessor for injecting exceptions on Get requests. */ -public class ErrorThrowingGetObserver implements RegionObserver { +public class ErrorThrowingGetObserver implements RegionCoprocessor, RegionObserver { + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + public static final String SHOULD_ERROR_ATTRIBUTE = "error"; @Override diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index d7757ef216d..f6b9579ba9c 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.filter.ParseFilter; @@ -88,6 +89,7 @@ import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.HashMap; +import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.getFromThrift; @@ -1552,11 +1554,16 @@ public class TestThriftHBaseServiceHandler { assertTColumnValueEqual(columnValueB, result.getColumnValues().get(1)); } - public static class DelayingRegionObserver implements RegionObserver { + public static class DelayingRegionObserver implements RegionCoprocessor, RegionObserver { private static final Log LOG = LogFactory.getLog(DelayingRegionObserver.class); // sleep time in msec private long delayMillis; + @Override + public Optional getRegionObserver() { + return Optional.of(this); + } + @Override public void start(CoprocessorEnvironment e) throws IOException { this.delayMillis = e.getConfiguration()