HBASE-17732 Coprocessor Design Improvements
------------------------------------------------------ TL;DR ------------------------------------------------------ We are moving from Inheritence - Observer *is* Coprocessor - FooService *is* CoprocessorService To Composition - Coprocessor *has* Observer - Coprocessor *has* Service ------------------------------------------------------ Design Changes ------------------------------------------------------ - Adds four new interfaces - MasterCoprocessor, RegionCoprocessor, RegionServierCoprocessor, WALCoprocessor - These new *Coprocessor interfaces have a get*Observer() function for each observer type supported by them. - Added Coprocessor#getService() to base interface. All extending *Coprocessor interfaces will get it from the base interface. - Added BulkLoadObserver hooks to RegionCoprocessorHost instad of SecureBulkLoadManager doing its own trickery. - CoprocessorHost#find*() fuctions: Too many testing hooks digging into CP internals. Deleted if can, else marked @VisibleForTesting. ------------------------------------------------------ Backward Compatibility ------------------------------------------------------ - Old coprocessors implementing *Observer won't get loaded (no backward compatibility guarantees). - Third party coprocessors only implementing Coprocessor will not get loaded (just like Observers). - Old coprocessors implementing CoprocessorService (for master/region host) /SingletonCoprocessorService (for RegionServer host) will continue to work with 2.0. - Added test to ensure backward compatibility of CoprocessorService/SingletonCoprocessorService - Note that if a coprocessor implements both observer and service in same class, its service component will continue to work but it's observer component won't work. ------------------------------------------------------ Notes ------------------------------------------------------ Did a side-by-side comparison of CPs in master and after patch. These coprocessors which were just CoprocessorService earlier, needed a home in some coprocessor in new design. For most it was clear since they were using a particular type of environment. Some were tricky. - JMXListener - MasterCoprocessor and RSCoprocessor (because jmx listener makes sense for processes?) - RSGroupAdminEndpoint --> MasterCP - VisibilityController -> MasterCP and RegionCP These were converted to RegionCoprocessor because they were using RegionCoprocessorEnvironment which can only come from a RegionCPHost. - AggregateImplementation - BaseRowProcessorEndpoint - BulkDeleteEndpoint - Export - RefreshHFilesEndpoint - RowCountEndpoint - MultiRowMutationEndpoint - SecureBulkLoadEndpoint - TokenProvider Change-Id: I813145f2bc11815f52ac703563b879962c249764
This commit is contained in:
parent
0fcc84cadd
commit
0c883a23c5
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.backup;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.client.Connection;
|
|||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
@ -43,8 +45,14 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* An Observer to facilitate backup operations
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class BackupObserver implements RegionObserver {
|
||||
public class BackupObserver implements RegionCoprocessor, RegionObserver {
|
||||
private static final Log LOG = LogFactory.getLog(BackupObserver.class);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths,
|
||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.client.Connection;
|
|||
import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -67,9 +69,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
|
|||
POST_DELETE_SNAPSHOT_FAILURE
|
||||
}
|
||||
|
||||
public static class MasterSnapshotObserver implements MasterObserver {
|
||||
|
||||
|
||||
public static class MasterSnapshotObserver implements MasterCoprocessor, MasterObserver {
|
||||
List<Failure> failures = new ArrayList<Failure>();
|
||||
|
||||
public void setFailures(Failure ... f) {
|
||||
|
@ -79,6 +79,11 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
final SnapshotDescription snapshot, final TableDescriptor hTableDescriptor)
|
||||
|
@ -121,8 +126,8 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{
|
|||
|
||||
|
||||
private MasterSnapshotObserver getMasterSnapshotObserver() {
|
||||
return (MasterSnapshotObserver)TEST_UTIL.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSnapshotObserver.class.getName());
|
||||
return TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost()
|
||||
.findCoprocessor(MasterSnapshotObserver.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
|
@ -53,9 +55,22 @@ public interface Coprocessor {
|
|||
STOPPED
|
||||
}
|
||||
|
||||
// Interface
|
||||
/**
|
||||
* Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the
|
||||
* coprocessor.
|
||||
*/
|
||||
default void start(CoprocessorEnvironment env) throws IOException {}
|
||||
|
||||
/**
|
||||
* Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the
|
||||
* coprocessor.
|
||||
*/
|
||||
default void stop(CoprocessorEnvironment env) throws IOException {}
|
||||
|
||||
/**
|
||||
* Coprocessor endpoints providing protobuf services should implement this interface.
|
||||
*/
|
||||
default Optional<Service> getService() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
* Coprocessor environment state.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface CoprocessorEnvironment {
|
||||
public interface CoprocessorEnvironment<C extends Coprocessor> {
|
||||
|
||||
/** @return the Coprocessor interface version */
|
||||
int getVersion();
|
||||
|
@ -39,7 +39,7 @@ public interface CoprocessorEnvironment {
|
|||
String getHBaseVersion();
|
||||
|
||||
/** @return the loaded coprocessor instance */
|
||||
Coprocessor getInstance();
|
||||
C getInstance();
|
||||
|
||||
/** @return the priority assigned to the loaded coprocessor */
|
||||
int getPriority();
|
||||
|
@ -67,4 +67,13 @@ public interface CoprocessorEnvironment {
|
|||
* @return the classloader for the loaded coprocessor instance
|
||||
*/
|
||||
ClassLoader getClassLoader();
|
||||
|
||||
/**
|
||||
* After a coprocessor has been loaded in an encapsulation of an environment, CoprocessorHost
|
||||
* calls this function to initialize the environment.
|
||||
*/
|
||||
void startup() throws IOException;
|
||||
|
||||
/** Clean up the environment. Called by CoprocessorHost when it itself is shutting down. */
|
||||
void shutdown();
|
||||
}
|
||||
|
|
|
@ -681,8 +681,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
|
|||
|
||||
/**
|
||||
* Add a table coprocessor to this table. The coprocessor
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
|
||||
* or Endpoint.
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
|
||||
* It won't check if the class can be loaded or not.
|
||||
* Whether a coprocessor is loadable or not will be determined when
|
||||
* a region is opened.
|
||||
|
@ -696,8 +695,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
|
|||
|
||||
/**
|
||||
* Add a table coprocessor to this table. The coprocessor
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
|
||||
* or Endpoint.
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
|
||||
* It won't check if the class can be loaded or not.
|
||||
* Whether a coprocessor is loadable or not will be determined when
|
||||
* a region is opened.
|
||||
|
@ -717,8 +715,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
|
|||
|
||||
/**
|
||||
* Add a table coprocessor to this table. The coprocessor
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
|
||||
* or Endpoint.
|
||||
* type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
|
||||
* It won't check if the class can be loaded or not.
|
||||
* Whether a coprocessor is loadable or not will be determined when
|
||||
* a region is opened.
|
||||
|
|
|
@ -31,11 +31,11 @@ import java.nio.ByteBuffer;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
@ -60,8 +60,8 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
|||
* @param R PB message that is used to transport Promoted (<S>) instance
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message>
|
||||
extends AggregateService implements CoprocessorService, Coprocessor {
|
||||
public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message>
|
||||
extends AggregateService implements RegionCoprocessor {
|
||||
protected static final Log log = LogFactory.getLog(AggregateImplementation.class);
|
||||
private RegionCoprocessorEnvironment env;
|
||||
|
||||
|
@ -156,7 +156,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
results.clear();
|
||||
} while (hasMoreRows);
|
||||
if (min != null) {
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
ci.getProtoForCellType(min).toByteString()).build();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
|
@ -211,7 +211,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
results.clear();
|
||||
} while (hasMoreRows);
|
||||
if (sumVal != null) {
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
ci.getProtoForPromotedType(sumVal).toByteString()).build();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
|
@ -262,7 +262,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
} while (hasMoreRows);
|
||||
ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
|
||||
bb.rewind();
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
response = AggregateResponse.newBuilder().addFirstPart(
|
||||
ByteString.copyFrom(bb)).build();
|
||||
} catch (IOException e) {
|
||||
CoprocessorRpcUtils.setControllerException(controller, e);
|
||||
|
@ -310,7 +310,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
}
|
||||
List<Cell> results = new ArrayList<>();
|
||||
boolean hasMoreRows = false;
|
||||
|
||||
|
||||
do {
|
||||
results.clear();
|
||||
hasMoreRows = scanner.next(results);
|
||||
|
@ -371,7 +371,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
List<Cell> results = new ArrayList<>();
|
||||
|
||||
boolean hasMoreRows = false;
|
||||
|
||||
|
||||
do {
|
||||
tempVal = null;
|
||||
hasMoreRows = scanner.next(results);
|
||||
|
@ -413,7 +413,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
* It is computed for the combination of column
|
||||
* family and column qualifier(s) in the given row range as defined in the
|
||||
* Scan object. In its current implementation, it takes one column family and
|
||||
* two column qualifiers. The first qualifier is for values column and
|
||||
* two column qualifiers. The first qualifier is for values column and
|
||||
* the second qualifier (optional) is for weight column.
|
||||
*/
|
||||
@Override
|
||||
|
@ -437,7 +437,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
List<Cell> results = new ArrayList<>();
|
||||
|
||||
boolean hasMoreRows = false;
|
||||
|
||||
|
||||
do {
|
||||
tempVal = null;
|
||||
tempWeight = null;
|
||||
|
@ -461,7 +461,7 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
ByteString first_sumWeights = ci.getProtoForPromotedType(s).toByteString();
|
||||
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
|
||||
pair.addFirstPart(first_sumVal);
|
||||
pair.addFirstPart(first_sumWeights);
|
||||
pair.addFirstPart(first_sumWeights);
|
||||
response = pair.build();
|
||||
} catch (IOException e) {
|
||||
CoprocessorRpcUtils.setControllerException(controller, e);
|
||||
|
@ -500,8 +500,8 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -527,5 +527,5 @@ extends AggregateService implements CoprocessorService, Coprocessor {
|
|||
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.ArrayList;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -35,7 +36,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
@ -87,8 +87,7 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public class Export extends ExportProtos.ExportService
|
||||
implements Coprocessor, CoprocessorService {
|
||||
public class Export extends ExportProtos.ExportService implements RegionCoprocessor {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(Export.class);
|
||||
private static final Class<? extends CompressionCodec> DEFAULT_CODEC = DefaultCodec.class;
|
||||
|
@ -312,8 +311,8 @@ public class Export extends ExportProtos.ExportService
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,14 +21,13 @@ package org.apache.hadoop.hbase.security.access;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
|
@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager;
|
|||
import com.google.protobuf.RpcCallback;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Coprocessor service for bulk loads in secure mode.
|
||||
|
@ -55,8 +55,7 @@ import com.google.protobuf.Service;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
@Deprecated
|
||||
public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
||||
implements CoprocessorService, Coprocessor {
|
||||
public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements RegionCoprocessor {
|
||||
|
||||
public static final long VERSION = 0L;
|
||||
|
||||
|
@ -176,7 +175,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.ColumnAggregationService;
|
||||
|
@ -45,13 +45,13 @@ import com.google.protobuf.Service;
|
|||
* The aggregation implementation at a region.
|
||||
*/
|
||||
public class ColumnAggregationEndpoint extends ColumnAggregationService
|
||||
implements Coprocessor, CoprocessorService {
|
||||
implements RegionCoprocessor {
|
||||
private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpoint.class);
|
||||
private RegionCoprocessorEnvironment env = null;
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
@ -47,14 +47,12 @@ import com.google.protobuf.Service;
|
|||
* response values.
|
||||
*/
|
||||
public class ColumnAggregationEndpointNullResponse
|
||||
extends
|
||||
ColumnAggregationServiceNullResponse
|
||||
implements Coprocessor, CoprocessorService {
|
||||
extends ColumnAggregationServiceNullResponse implements RegionCoprocessor {
|
||||
private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpointNullResponse.class);
|
||||
private RegionCoprocessorEnvironment env = null;
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -48,14 +48,14 @@ import com.google.protobuf.Service;
|
|||
* coprocessor endpoints throwing exceptions.
|
||||
*/
|
||||
public class ColumnAggregationEndpointWithErrors
|
||||
extends
|
||||
ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors
|
||||
implements Coprocessor, CoprocessorService {
|
||||
extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors
|
||||
implements RegionCoprocessor {
|
||||
private static final Log LOG = LogFactory.getLog(ColumnAggregationEndpointWithErrors.class);
|
||||
private RegionCoprocessorEnvironment env = null;
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -73,7 +73,7 @@ implements Coprocessor, CoprocessorService {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request,
|
||||
public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request,
|
||||
RpcCallback<ColumnAggregationWithErrorsSumResponse> done) {
|
||||
// aggregate at each region
|
||||
Scan scan = new Scan();
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import com.google.protobuf.RpcCallback;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
|
@ -34,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos;
|
|||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Test implementation of a coprocessor endpoint exposing the
|
||||
|
@ -41,13 +41,12 @@ import java.io.IOException;
|
|||
* only.
|
||||
*/
|
||||
public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto
|
||||
implements CoprocessorService, Coprocessor {
|
||||
public ProtobufCoprocessorService() {
|
||||
}
|
||||
implements MasterCoprocessor, RegionCoprocessor {
|
||||
public ProtobufCoprocessorService() {}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,28 +23,21 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.client.TestAsyncAdminBase;
|
||||
import org.apache.hadoop.hbase.coprocessor.TestRegionServerCoprocessorEndpoint.DummyRegionServerEndpoint;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyResponse;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException;
|
||||
import org.apache.hadoop.hbase.ipc.ServerRpcController;
|
||||
import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos;
|
||||
import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -56,7 +49,6 @@ import org.junit.runners.Parameterized;
|
|||
import com.google.protobuf.RpcCallback;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.Service;
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
@RunWith(Parameterized.class)
|
||||
@Category({ ClientTests.class, MediumTests.class })
|
||||
|
@ -133,14 +125,14 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase {
|
|||
}
|
||||
}
|
||||
|
||||
static class DummyRegionServerEndpoint extends DummyService implements Coprocessor, SingletonCoprocessorService {
|
||||
public static class DummyRegionServerEndpoint extends DummyService
|
||||
implements RegionServerCoprocessor {
|
||||
|
||||
public DummyRegionServerEndpoint() {
|
||||
}
|
||||
public DummyRegionServerEndpoint() {}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -54,7 +54,12 @@ public class TestClassLoading {
|
|||
private static final Log LOG = LogFactory.getLog(TestClassLoading.class);
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
|
||||
public static class TestMasterCoprocessor implements MasterObserver {}
|
||||
public static class TestMasterCoprocessor implements MasterCoprocessor, MasterObserver {
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
|
||||
|
@ -69,7 +74,7 @@ public class TestClassLoading {
|
|||
private static Class<?> regionCoprocessor1 = ColumnAggregationEndpoint.class;
|
||||
// TOOD: Fix the import of this handler. It is coming in from a package that is far away.
|
||||
private static Class<?> regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class;
|
||||
private static Class<?> regionServerCoprocessor = SampleRegionWALObserver.class;
|
||||
private static Class<?> regionServerCoprocessor = SampleRegionWALCoprocessor.class;
|
||||
private static Class<?> masterCoprocessor = TestMasterCoprocessor.class;
|
||||
|
||||
private static final String[] regionServerSystemCoprocessors =
|
||||
|
@ -110,8 +115,9 @@ public class TestClassLoading {
|
|||
}
|
||||
|
||||
static File buildCoprocessorJar(String className) throws Exception {
|
||||
String code = "import org.apache.hadoop.hbase.coprocessor.*;" +
|
||||
"public class " + className + " implements RegionObserver {}";
|
||||
String code =
|
||||
"import org.apache.hadoop.hbase.coprocessor.*;" +
|
||||
"public class " + className + " implements RegionCoprocessor {}";
|
||||
return ClassLoaderTestHelper.buildJar(
|
||||
TEST_UTIL.getDataTestDir().toString(), className, code);
|
||||
}
|
||||
|
@ -539,19 +545,6 @@ public class TestClassLoading {
|
|||
assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindCoprocessors() {
|
||||
// HBASE 12277:
|
||||
CoprocessorHost masterCpHost =
|
||||
TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessorHost();
|
||||
|
||||
List<MasterObserver> masterObservers = masterCpHost.findCoprocessors(MasterObserver.class);
|
||||
|
||||
assertTrue(masterObservers != null && masterObservers.size() > 0);
|
||||
assertEquals(masterCoprocessor.getSimpleName(),
|
||||
masterObservers.get(0).getClass().getSimpleName());
|
||||
}
|
||||
|
||||
private void waitForTable(TableName name) throws InterruptedException, IOException {
|
||||
// First wait until all regions are online
|
||||
TEST_UTIL.waitTableEnabled(name);
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import com.google.protobuf.RpcCallback;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.*;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Tests to ensure that 2.0 is backward compatible in loading CoprocessorService.
|
||||
*/
|
||||
@Category({SmallTests.class})
|
||||
public class TestCoprocessorServiceBackwardCompatibility {
|
||||
private static HBaseTestingUtility TEST_UTIL = null;
|
||||
private static Configuration CONF = null;
|
||||
|
||||
public static class DummyCoprocessorService extends DummyService
|
||||
implements CoprocessorService, SingletonCoprocessorService {
|
||||
static int numCalls = 0;
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dummyCall(RpcController controller, DummyRequest request,
|
||||
RpcCallback<DummyResponse> callback) {
|
||||
callback.run(DummyResponse.newBuilder().setValue("").build());
|
||||
numCalls++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dummyThrow(RpcController controller, DummyRequest request,
|
||||
RpcCallback<DummyResponse> callback) {
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setupBeforeClass() throws Exception {
|
||||
TEST_UTIL = new HBaseTestingUtility();
|
||||
CONF = TEST_UTIL.getConfiguration();
|
||||
DummyCoprocessorService.numCalls = 0;
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCoprocessorServiceLoadedByMaster() throws Exception {
|
||||
CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
|
||||
DummyCoprocessorService.class.getName());
|
||||
TEST_UTIL.startMiniCluster();
|
||||
|
||||
TEST_UTIL.getAdmin().coprocessorService().callBlockingMethod(
|
||||
DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null,
|
||||
DummyRequest.getDefaultInstance(), DummyResponse.getDefaultInstance());
|
||||
|
||||
assertEquals(1, DummyCoprocessorService.numCalls);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCoprocessorServiceLoadedByRegionServer() throws Exception {
|
||||
CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY,
|
||||
DummyCoprocessorService.class.getName());
|
||||
TEST_UTIL.startMiniCluster();
|
||||
TEST_UTIL.getAdmin().coprocessorService(
|
||||
TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()).callBlockingMethod(
|
||||
DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null,
|
||||
DummyRequest.getDefaultInstance(), DummyResponse.getDefaultInstance());
|
||||
assertEquals(1, DummyCoprocessorService.numCalls);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCoprocessorServiceLoadedByRegion() throws Throwable {
|
||||
CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
|
||||
DummyCoprocessorService.class.getName());
|
||||
TEST_UTIL.startMiniCluster();
|
||||
TEST_UTIL.getConnection().getTable(TableName.valueOf("hbase:meta")).batchCoprocessorService(
|
||||
DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"),
|
||||
DummyRequest.getDefaultInstance(), Bytes.toBytes(""), Bytes.toBytes(""),
|
||||
DummyResponse.getDefaultInstance());
|
||||
assertEquals(1, DummyCoprocessorService.numCalls);
|
||||
}
|
||||
}
|
|
@ -22,9 +22,9 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
|
@ -102,21 +102,12 @@ public class TestRegionServerCoprocessorEndpoint {
|
|||
((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim());
|
||||
}
|
||||
|
||||
static class DummyRegionServerEndpoint extends DummyService implements Coprocessor, SingletonCoprocessorService {
|
||||
public static class DummyRegionServerEndpoint extends DummyService
|
||||
implements RegionServerCoprocessor {
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
// TODO Auto-generated method stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||
// TODO Auto-generated method stub
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -310,7 +310,7 @@ public class TestRowProcessorEndpoint {
|
|||
* So they can be loaded with the endpoint on the coprocessor.
|
||||
*/
|
||||
public static class RowProcessorEndpoint<S extends Message,T extends Message>
|
||||
extends BaseRowProcessorEndpoint<S,T> implements CoprocessorService {
|
||||
extends BaseRowProcessorEndpoint<S,T> {
|
||||
public static class IncrementCounterProcessor extends
|
||||
BaseRowProcessor<IncrementCounterProcessorTestProtos.IncCounterProcessorRequest,
|
||||
IncrementCounterProcessorTestProtos.IncCounterProcessorResponse> {
|
||||
|
@ -417,7 +417,7 @@ public class TestRowProcessorEndpoint {
|
|||
|
||||
@Override
|
||||
public FriendsOfFriendsProcessorResponse getResult() {
|
||||
FriendsOfFriendsProcessorResponse.Builder builder =
|
||||
FriendsOfFriendsProcessorResponse.Builder builder =
|
||||
FriendsOfFriendsProcessorResponse.newBuilder();
|
||||
builder.addAllResult(result);
|
||||
return builder.build();
|
||||
|
@ -469,7 +469,7 @@ public class TestRowProcessorEndpoint {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void initialize(FriendsOfFriendsProcessorRequest request)
|
||||
public void initialize(FriendsOfFriendsProcessorRequest request)
|
||||
throws IOException {
|
||||
this.person = request.getPerson().toByteArray();
|
||||
this.row = request.getRow().toByteArray();
|
||||
|
@ -546,7 +546,7 @@ public class TestRowProcessorEndpoint {
|
|||
// Delete from the current row and add to the other row
|
||||
Delete d = new Delete(rows[i]);
|
||||
KeyValue kvDelete =
|
||||
new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
|
||||
new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
|
||||
kv.getTimestamp(), KeyValue.Type.Delete);
|
||||
d.add(kvDelete);
|
||||
Put p = new Put(rows[1 - i]);
|
||||
|
|
|
@ -24,10 +24,10 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos;
|
||||
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest;
|
||||
|
@ -75,8 +75,7 @@ public class TestServerCustomProtocol {
|
|||
static final String HELLO = "Hello, ";
|
||||
|
||||
/* Test protocol implementation */
|
||||
public static class PingHandler extends PingProtos.PingService
|
||||
implements Coprocessor, CoprocessorService {
|
||||
public static class PingHandler extends PingProtos.PingService implements RegionCoprocessor {
|
||||
private int counter = 0;
|
||||
|
||||
@Override
|
||||
|
@ -125,8 +124,8 @@ public class TestServerCustomProtocol {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,7 +319,7 @@ public class TestServerCustomProtocol {
|
|||
// rows from 1 region
|
||||
assertEquals(1, results.size());
|
||||
verifyRegionResults(locator, results, ROW_A);
|
||||
|
||||
|
||||
final String name = "NAME";
|
||||
results = hello(table, name, null, ROW_A);
|
||||
// Should have gotten results for 1 of the three regions only since we specified
|
||||
|
@ -343,12 +342,12 @@ public class TestServerCustomProtocol {
|
|||
// test,,1355943549657.c65d4822d8bdecc033a96451f3a0f55d.
|
||||
// test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e.
|
||||
// test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74.
|
||||
|
||||
|
||||
Map<byte [], String> results = ping(table, null, ROW_A);
|
||||
// Should contain first region only.
|
||||
assertEquals(1, results.size());
|
||||
verifyRegionResults(locator, results, ROW_A);
|
||||
|
||||
|
||||
// Test start row + empty end
|
||||
results = ping(table, ROW_BC, null);
|
||||
assertEquals(2, results.size());
|
||||
|
@ -358,7 +357,7 @@ public class TestServerCustomProtocol {
|
|||
results.get(loc.getRegionInfo().getRegionName()));
|
||||
verifyRegionResults(locator, results, ROW_B);
|
||||
verifyRegionResults(locator, results, ROW_C);
|
||||
|
||||
|
||||
// test empty start + end
|
||||
results = ping(table, null, ROW_BC);
|
||||
// should contain the first 2 regions
|
||||
|
@ -368,7 +367,7 @@ public class TestServerCustomProtocol {
|
|||
loc = locator.getRegionLocation(ROW_C, true);
|
||||
assertNull("Should be missing region for row ccc (past stop row)",
|
||||
results.get(loc.getRegionInfo().getRegionName()));
|
||||
|
||||
|
||||
// test explicit start + end
|
||||
results = ping(table, ROW_AB, ROW_BC);
|
||||
// should contain first 2 regions
|
||||
|
@ -378,7 +377,7 @@ public class TestServerCustomProtocol {
|
|||
loc = locator.getRegionLocation(ROW_C, true);
|
||||
assertNull("Should be missing region for row ccc (past stop row)",
|
||||
results.get(loc.getRegionInfo().getRegionName()));
|
||||
|
||||
|
||||
// test single region
|
||||
results = ping(table, ROW_B, ROW_BC);
|
||||
// should only contain region bbb
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
|
@ -28,7 +29,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
||||
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Delete;
|
|||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest;
|
||||
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType;
|
||||
|
@ -57,7 +57,7 @@ import com.google.protobuf.Service;
|
|||
|
||||
/**
|
||||
* Defines a protocol to delete data in bulk based on a scan. The scan can be range scan or with
|
||||
* conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s)
|
||||
* conditions(filters) etc.This can be used to delete rows, column family(s), column qualifier(s)
|
||||
* or version(s) of columns.When delete type is FAMILY or COLUMN, which all family(s) or column(s)
|
||||
* getting deleted will be determined by the Scan. Scan need to select all the families/qualifiers
|
||||
* which need to be deleted.When delete type is VERSION, Which column(s) and version(s) to be
|
||||
|
@ -65,16 +65,16 @@ import com.google.protobuf.Service;
|
|||
* which needs to be deleted.When a timestamp is passed only one version at that timestamp will be
|
||||
* deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
|
||||
* which the Scan selects will get deleted.
|
||||
*
|
||||
*
|
||||
* <br> Example: <pre><code>
|
||||
* Scan scan = new Scan();
|
||||
* // set scan properties(rowkey range, filters, timerange etc).
|
||||
* HTable ht = ...;
|
||||
* long noOfDeletedRows = 0L;
|
||||
* Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
|
||||
* Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
|
||||
* new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
|
||||
* ServerRpcController controller = new ServerRpcController();
|
||||
* BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
|
||||
* BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
|
||||
* new BlockingRpcCallback<BulkDeleteResponse>();
|
||||
*
|
||||
* public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
|
||||
|
@ -95,16 +95,15 @@ import com.google.protobuf.Service;
|
|||
* }
|
||||
* </code></pre>
|
||||
*/
|
||||
public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
|
||||
Coprocessor {
|
||||
public class BulkDeleteEndpoint extends BulkDeleteService implements RegionCoprocessor {
|
||||
private static final String NO_OF_VERSIONS_TO_DELETE = "noOfVersionsToDelete";
|
||||
private static final Log LOG = LogFactory.getLog(BulkDeleteEndpoint.class);
|
||||
|
||||
private RegionCoprocessorEnvironment env;
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hbase.coprocessor.example;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -45,7 +47,11 @@ import org.apache.hadoop.hbase.metrics.Timer;
|
|||
* </p>
|
||||
* @see ExampleRegionObserverWithMetrics
|
||||
*/
|
||||
public class ExampleMasterObserverWithMetrics implements MasterObserver {
|
||||
public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, MasterObserver {
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ExampleMasterObserverWithMetrics.class);
|
||||
|
||||
|
@ -133,4 +139,4 @@ public class ExampleMasterObserverWithMetrics implements MasterObserver {
|
|||
registry.register("maxMemory", this::getMaxMemory);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,12 +22,14 @@ package org.apache.hadoop.hbase.coprocessor.example;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.metrics.Counter;
|
||||
|
@ -45,36 +47,49 @@ import org.apache.hadoop.hbase.metrics.Timer;
|
|||
*
|
||||
* @see ExampleMasterObserverWithMetrics
|
||||
*/
|
||||
public class ExampleRegionObserverWithMetrics implements RegionObserver {
|
||||
public class ExampleRegionObserverWithMetrics implements RegionCoprocessor {
|
||||
|
||||
private Counter preGetCounter;
|
||||
private Timer costlyOperationTimer;
|
||||
private ExampleRegionObserver observer;
|
||||
|
||||
@Override
|
||||
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get, List<Cell> results)
|
||||
throws IOException {
|
||||
// Increment the Counter whenever the coprocessor is called
|
||||
preGetCounter.increment();
|
||||
}
|
||||
class ExampleRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
|
||||
List<Cell> results) throws IOException {
|
||||
// do a costly (high latency) operation which we want to measure how long it takes by
|
||||
// using a Timer (which is a Meter and a Histogram).
|
||||
long start = System.nanoTime();
|
||||
try {
|
||||
performCostlyOperation();
|
||||
} finally {
|
||||
costlyOperationTimer.updateNanos(System.nanoTime() - start);
|
||||
@Override
|
||||
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
|
||||
List<Cell> results) throws IOException {
|
||||
// Increment the Counter whenever the coprocessor is called
|
||||
preGetCounter.increment();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
|
||||
List<Cell> results) throws IOException {
|
||||
// do a costly (high latency) operation which we want to measure how long it takes by
|
||||
// using a Timer (which is a Meter and a Histogram).
|
||||
long start = System.nanoTime();
|
||||
try {
|
||||
performCostlyOperation();
|
||||
} finally {
|
||||
costlyOperationTimer.updateNanos(System.nanoTime() - start);
|
||||
}
|
||||
}
|
||||
|
||||
private void performCostlyOperation() {
|
||||
try {
|
||||
// simulate the operation by sleeping.
|
||||
Thread.sleep(ThreadLocalRandom.current().nextLong(100));
|
||||
} catch (InterruptedException ignore) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void performCostlyOperation() {
|
||||
try {
|
||||
// simulate the operation by sleeping.
|
||||
Thread.sleep(ThreadLocalRandom.current().nextLong(100));
|
||||
} catch (InterruptedException ignore) {}
|
||||
@Override public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(observer);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -88,6 +103,7 @@ public class ExampleRegionObserverWithMetrics implements RegionObserver {
|
|||
// at the region server level per-regionserver.
|
||||
MetricRegistry registry =
|
||||
((RegionCoprocessorEnvironment) env).getMetricRegistryForRegionServer();
|
||||
observer = new ExampleRegionObserver();
|
||||
|
||||
if (preGetCounter == null) {
|
||||
// Create a new Counter, or get the already registered counter.
|
||||
|
|
|
@ -23,16 +23,16 @@ import com.google.protobuf.RpcController;
|
|||
import com.google.protobuf.Service;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Coprocessor endpoint to refresh HFiles on replica.
|
||||
|
@ -43,7 +43,7 @@ import java.io.IOException;
|
|||
* </p>
|
||||
*/
|
||||
public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesService
|
||||
implements Coprocessor, CoprocessorService {
|
||||
implements RegionCoprocessor {
|
||||
protected static final Log LOG = LogFactory.getLog(RefreshHFilesEndpoint.class);
|
||||
private RegionCoprocessorEnvironment env;
|
||||
|
||||
|
@ -51,8 +51,8 @@ public class RefreshHFilesEndpoint extends RefreshHFilesProtos.RefreshHFilesServ
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,14 +21,14 @@ package org.apache.hadoop.hbase.coprocessor.example;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos;
|
||||
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
|
||||
|
@ -48,8 +48,7 @@ import com.google.protobuf.Service;
|
|||
* hbase-examples/src/main/protobuf/Examples.proto.
|
||||
* </p>
|
||||
*/
|
||||
public class RowCountEndpoint extends ExampleProtos.RowCountService
|
||||
implements Coprocessor, CoprocessorService {
|
||||
public class RowCountEndpoint extends ExampleProtos.RowCountService implements RegionCoprocessor {
|
||||
private RegionCoprocessorEnvironment env;
|
||||
|
||||
public RowCountEndpoint() {
|
||||
|
@ -59,8 +58,8 @@ public class RowCountEndpoint extends ExampleProtos.RowCountService
|
|||
* Just returns a reference to this object, which implements the RowCounterService interface.
|
||||
*/
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor.example;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Optional;
|
||||
import java.util.OptionalInt;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.client.IsolationLevel;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
|
@ -51,20 +53,25 @@ import org.apache.zookeeper.ZooKeeper;
|
|||
* This is an example showing how a RegionObserver could configured
|
||||
* via ZooKeeper in order to control a Region compaction, flush, and scan policy.
|
||||
*
|
||||
* This also demonstrated the use of shared
|
||||
* This also demonstrated the use of shared
|
||||
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
|
||||
* See {@link RegionCoprocessorEnvironment#getSharedData()}.
|
||||
*
|
||||
* This would be useful for an incremental backup tool, which would indicate the last
|
||||
* time of a successful backup via ZK and instruct HBase to not delete data that was
|
||||
* inserted since (based on wall clock time).
|
||||
* inserted since (based on wall clock time).
|
||||
*
|
||||
* This implements org.apache.zookeeper.Watcher directly instead of using
|
||||
* {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
|
||||
* {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
|
||||
* because RegionObservers come and go and currently
|
||||
* listeners registered with ZooKeeperWatcher cannot be removed.
|
||||
*/
|
||||
public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
||||
public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
// The zk ensemble info is put in hbase config xml with given custom key.
|
||||
public static final String ZK_ENSEMBLE_KEY = "ZooKeeperScanPolicyObserver.zookeeper.ensemble";
|
||||
public static final String ZK_SESSION_TIMEOUT_KEY =
|
||||
|
@ -243,4 +250,4 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
|||
return new StoreScanner((HStore) store, scanInfo, scan, targetCols,
|
||||
((HStore) store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,6 +27,7 @@ import java.util.ArrayList;
|
|||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
|
@ -155,12 +157,18 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
private boolean load = false;
|
||||
private boolean check = false;
|
||||
|
||||
public static class SlowMeCoproScanOperations implements RegionObserver {
|
||||
public static class SlowMeCoproScanOperations implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(2000);
|
||||
Random r = new Random();
|
||||
AtomicLong countOfNext = new AtomicLong(0);
|
||||
AtomicLong countOfOpen = new AtomicLong(0);
|
||||
public SlowMeCoproScanOperations() {}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Scan scan, final RegionScanner s) throws IOException {
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
|
@ -247,7 +249,13 @@ public class TestImportTSVWithOperationAttributes implements Configurable {
|
|||
assertTrue(verified);
|
||||
}
|
||||
|
||||
public static class OperationAttributesTestController implements RegionObserver {
|
||||
public static class OperationAttributesTestController
|
||||
implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
|
@ -156,7 +158,12 @@ public class TestImportTSVWithTTLs implements Configurable {
|
|||
return tool;
|
||||
}
|
||||
|
||||
public static class TTLCheckingObserver implements RegionObserver {
|
||||
public static class TTLCheckingObserver implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit,
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.protobuf.RpcCallback;
|
||||
|
@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
|
|||
import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -70,8 +71,9 @@ import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
|
|||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
// TODO: Encapsulate MasterObserver functions into separate subclass.
|
||||
@InterfaceAudience.Private
|
||||
public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService {
|
||||
public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
|
||||
private static final Log LOG = LogFactory.getLog(RSGroupAdminEndpoint.class);
|
||||
|
||||
private MasterServices master = null;
|
||||
|
@ -94,8 +96,13 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return groupAdminService;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(groupAdminService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
RSGroupInfoManager getGroupInfoManager() {
|
||||
|
@ -107,12 +114,6 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService
|
|||
* This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol
|
||||
* buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with
|
||||
* the response.
|
||||
* Since our CoprocessorHost asks the Coprocessor for a Service
|
||||
* ({@link CoprocessorService#getService()}) instead of doing "coproc instanceOf Service"
|
||||
* and requiring Coprocessor itself to be Service (something we do with our observers),
|
||||
* we can use composition instead of inheritance here. That makes it easy to manage
|
||||
* functionalities in concise classes (sometimes inner classes) instead of single class doing
|
||||
* many different things.
|
||||
*/
|
||||
private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
|
||||
@Override
|
||||
|
|
|
@ -97,8 +97,8 @@ public class TestRSGroups extends TestRSGroupsBase {
|
|||
admin.setBalancerRunning(false,true);
|
||||
rsGroupAdmin = new VerifyingRSGroupAdminClient(
|
||||
new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration());
|
||||
rsGroupAdminEndpoint =
|
||||
master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0);
|
||||
rsGroupAdminEndpoint = (RSGroupAdminEndpoint)
|
||||
master.getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -161,7 +161,7 @@ public class TestRSGroupsOfflineMode {
|
|||
|
||||
// Get groupInfoManager from the new active master.
|
||||
RSGroupInfoManager groupMgr = ((MiniHBaseCluster)cluster).getMaster().getMasterCoprocessorHost()
|
||||
.findCoprocessors(RSGroupAdminEndpoint.class).get(0).getGroupInfoManager();
|
||||
.findCoprocessor(RSGroupAdminEndpoint.class).getGroupInfoManager();
|
||||
// Make sure balancer is in offline mode, since this is what we're testing.
|
||||
assertFalse(groupMgr.isOnline());
|
||||
// Verify the group affiliation that's loaded from ZK instead of tables.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -32,6 +33,7 @@ import java.rmi.server.RMIClientSocketFactory;
|
|||
import java.rmi.server.RMIServerSocketFactory;
|
||||
import java.rmi.server.UnicastRemoteObject;
|
||||
import java.util.HashMap;
|
||||
import java.util.Optional;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.remote.JMXConnectorServer;
|
||||
|
@ -46,8 +48,7 @@ import javax.management.remote.rmi.RMIConnectorServer;
|
|||
* 2)support password authentication
|
||||
* 3)support subset of SSL (with default configuration)
|
||||
*/
|
||||
public class JMXListener implements Coprocessor {
|
||||
|
||||
public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor {
|
||||
private static final Log LOG = LogFactory.getLog(JMXListener.class);
|
||||
public static final String RMI_REGISTRY_PORT_CONF_KEY = ".rmi.registry.port";
|
||||
public static final String RMI_CONNECTOR_PORT_CONF_KEY = ".rmi.connector.port";
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
||||
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost.Environment;
|
||||
import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||
import org.apache.hadoop.io.MultipleIOException;
|
||||
|
@ -70,7 +70,7 @@ public final class HTableWrapper implements Table {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static Table createWrapper(List<Table> openTables,
|
||||
TableName tableName, Environment env, ExecutorService pool) throws IOException {
|
||||
TableName tableName, BaseEnvironment env, ExecutorService pool) throws IOException {
|
||||
return new HTableWrapper(openTables, tableName,
|
||||
CoprocessorHConnection.getConnectionForEnvironment(env), pool);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.constraint;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
|
@ -42,7 +44,7 @@ import org.apache.hadoop.hbase.wal.WALEdit;
|
|||
* implemented on any given system by a coprocessor.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ConstraintProcessor implements RegionObserver {
|
||||
public class ConstraintProcessor implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ConstraintProcessor.class);
|
||||
|
||||
|
@ -50,6 +52,11 @@ public class ConstraintProcessor implements RegionObserver {
|
|||
|
||||
private List<? extends Constraint> constraints = new ArrayList<>();
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the constraint processor.
|
||||
* <p>
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HTableWrapper;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
/**
|
||||
* Encapsulation of the environment of each coprocessor
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BaseEnvironment<C extends Coprocessor> implements CoprocessorEnvironment<C> {
|
||||
private static final Log LOG = LogFactory.getLog(BaseEnvironment.class);
|
||||
|
||||
/** The coprocessor */
|
||||
public C impl;
|
||||
/** Chaining priority */
|
||||
protected int priority = Coprocessor.PRIORITY_USER;
|
||||
/** Current coprocessor state */
|
||||
Coprocessor.State state = Coprocessor.State.UNINSTALLED;
|
||||
/** Accounting for tables opened by the coprocessor */
|
||||
protected List<Table> openTables =
|
||||
Collections.synchronizedList(new ArrayList<Table>());
|
||||
private int seq;
|
||||
private Configuration conf;
|
||||
private ClassLoader classLoader;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param impl the coprocessor instance
|
||||
* @param priority chaining priority
|
||||
*/
|
||||
public BaseEnvironment(final C impl, final int priority,
|
||||
final int seq, final Configuration conf) {
|
||||
this.impl = impl;
|
||||
this.classLoader = impl.getClass().getClassLoader();
|
||||
this.priority = priority;
|
||||
this.state = Coprocessor.State.INSTALLED;
|
||||
this.seq = seq;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
/** Initialize the environment */
|
||||
@Override
|
||||
public void startup() throws IOException {
|
||||
if (state == Coprocessor.State.INSTALLED ||
|
||||
state == Coprocessor.State.STOPPED) {
|
||||
state = Coprocessor.State.STARTING;
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(this.getClassLoader());
|
||||
impl.start(this);
|
||||
state = Coprocessor.State.ACTIVE;
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(hostClassLoader);
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Not starting coprocessor " + impl.getClass().getName() +
|
||||
" because not inactive (state=" + state.toString() + ")");
|
||||
}
|
||||
}
|
||||
|
||||
/** Clean up the environment */
|
||||
@Override
|
||||
public void shutdown() {
|
||||
if (state == Coprocessor.State.ACTIVE) {
|
||||
state = Coprocessor.State.STOPPING;
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(this.getClassLoader());
|
||||
impl.stop(this);
|
||||
state = Coprocessor.State.STOPPED;
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(hostClassLoader);
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+
|
||||
" because not active (state="+state.toString()+")");
|
||||
}
|
||||
synchronized (openTables) {
|
||||
// clean up any table references
|
||||
for (Table table: openTables) {
|
||||
try {
|
||||
((HTableWrapper)table).internalClose();
|
||||
} catch (IOException e) {
|
||||
// nothing can be done here
|
||||
LOG.warn("Failed to close " +
|
||||
table.getName(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public C getInstance() {
|
||||
return impl;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClassLoader getClassLoader() {
|
||||
return classLoader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPriority() {
|
||||
return priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getLoadSequence() {
|
||||
return seq;
|
||||
}
|
||||
|
||||
/** @return the coprocessor environment version */
|
||||
@Override
|
||||
public int getVersion() {
|
||||
return Coprocessor.VERSION;
|
||||
}
|
||||
|
||||
/** @return the HBase release */
|
||||
@Override
|
||||
public String getHBaseVersion() {
|
||||
return VersionInfo.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a table from within the Coprocessor environment
|
||||
* @param tableName the table name
|
||||
* @return an interface for manipulating the table
|
||||
* @exception IOException Exception
|
||||
*/
|
||||
@Override
|
||||
public Table getTable(TableName tableName) throws IOException {
|
||||
return this.getTable(tableName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a table from within the Coprocessor environment
|
||||
* @param tableName the table name
|
||||
* @return an interface for manipulating the table
|
||||
* @exception IOException Exception
|
||||
*/
|
||||
@Override
|
||||
public Table getTable(TableName tableName, ExecutorService pool) throws IOException {
|
||||
return HTableWrapper.createWrapper(openTables, tableName, this, pool);
|
||||
}
|
||||
}
|
|
@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -46,19 +46,19 @@ import com.google.protobuf.Service;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class BaseRowProcessorEndpoint<S extends Message, T extends Message>
|
||||
extends RowProcessorService implements CoprocessorService, Coprocessor {
|
||||
public abstract class BaseRowProcessorEndpoint<S extends Message, T extends Message>
|
||||
extends RowProcessorService implements RegionCoprocessor {
|
||||
private RegionCoprocessorEnvironment env;
|
||||
/**
|
||||
* Pass a processor to region to process multiple rows atomically.
|
||||
*
|
||||
*
|
||||
* The RowProcessor implementations should be the inner classes of your
|
||||
* RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
|
||||
* the Coprocessor endpoint together.
|
||||
*
|
||||
* See {@code TestRowProcessorEndpoint} for example.
|
||||
*
|
||||
* The request contains information for constructing processor
|
||||
* The request contains information for constructing processor
|
||||
* (see {@link #constructRowProcessorFromRequest}. The processor object defines
|
||||
* the read-modify-write procedure.
|
||||
*/
|
||||
|
@ -83,8 +83,8 @@ extends RowProcessorService implements CoprocessorService, Coprocessor {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBul
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface BulkLoadObserver extends Coprocessor {
|
||||
public interface BulkLoadObserver {
|
||||
/**
|
||||
* Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call.
|
||||
* It can't bypass the default action, e.g., ctx.bypass() won't have effect.
|
||||
|
|
|
@ -25,12 +25,13 @@ import java.util.Collections;
|
|||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentSkipListSet;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -44,22 +45,22 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
|||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.HTableWrapper;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
|
||||
import org.apache.hadoop.hbase.util.SortedList;
|
||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||
|
||||
/**
|
||||
* Provides the common setup framework and runtime services for coprocessor
|
||||
* invocation from HBase services.
|
||||
* @param <E> the specific environment extension that a concrete implementation
|
||||
* @param <C> type of specific coprocessor this host will handle
|
||||
* @param <E> type of specific coprocessor environment this host requires.
|
||||
* provides
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
||||
public abstract class CoprocessorHost<C extends Coprocessor, E extends CoprocessorEnvironment<C>> {
|
||||
public static final String REGION_COPROCESSOR_CONF_KEY =
|
||||
"hbase.coprocessor.region.classes";
|
||||
public static final String REGIONSERVER_COPROCESSOR_CONF_KEY =
|
||||
|
@ -81,7 +82,8 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
private static final Log LOG = LogFactory.getLog(CoprocessorHost.class);
|
||||
protected Abortable abortable;
|
||||
/** Ordered set of loaded coprocessors with lock */
|
||||
protected SortedList<E> coprocessors = new SortedList<>(new EnvironmentPriorityComparator());
|
||||
protected final SortedList<E> coprocEnvironments =
|
||||
new SortedList<>(new EnvironmentPriorityComparator());
|
||||
protected Configuration conf;
|
||||
// unique file prefix to use for local copies of jars when classloading
|
||||
protected String pathPrefix;
|
||||
|
@ -96,7 +98,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
* Not to be confused with the per-object _coprocessors_ (above),
|
||||
* coprocessorNames is static and stores the set of all coprocessors ever
|
||||
* loaded by any thread in this JVM. It is strictly additive: coprocessors are
|
||||
* added to coprocessorNames, by loadInstance() but are never removed, since
|
||||
* added to coprocessorNames, by checkAndLoadInstance() but are never removed, since
|
||||
* the intention is to preserve a history of all loaded coprocessors for
|
||||
* diagnosis in case of server crash (HBASE-4014).
|
||||
*/
|
||||
|
@ -118,7 +120,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
*/
|
||||
public Set<String> getCoprocessors() {
|
||||
Set<String> returnValue = new TreeSet<>();
|
||||
for (CoprocessorEnvironment e: coprocessors) {
|
||||
for (E e: coprocEnvironments) {
|
||||
returnValue.add(e.getInstance().getClass().getSimpleName());
|
||||
}
|
||||
return returnValue;
|
||||
|
@ -135,7 +137,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
return;
|
||||
}
|
||||
|
||||
Class<?> implClass = null;
|
||||
Class<?> implClass;
|
||||
|
||||
// load default coprocessors from configure file
|
||||
String[] defaultCPClasses = conf.getStrings(confKey);
|
||||
|
@ -156,10 +158,13 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
implClass = cl.loadClass(className);
|
||||
// Add coprocessors as we go to guard against case where a coprocessor is specified twice
|
||||
// in the configuration
|
||||
this.coprocessors.add(loadInstance(implClass, priority, conf));
|
||||
LOG.info("System coprocessor " + className + " was loaded " +
|
||||
"successfully with priority (" + priority + ").");
|
||||
++priority;
|
||||
E env = checkAndLoadInstance(implClass, priority, conf);
|
||||
if (env != null) {
|
||||
this.coprocEnvironments.add(env);
|
||||
LOG.info(
|
||||
"System coprocessor " + className + " was loaded " + "successfully with priority (" + priority + ").");
|
||||
++priority;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
// We always abort if system coprocessors cannot be loaded
|
||||
abortServer(className, t);
|
||||
|
@ -196,7 +201,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
*/
|
||||
public E load(Path path, String className, int priority,
|
||||
Configuration conf, String[] includedClassPrefixes) throws IOException {
|
||||
Class<?> implClass = null;
|
||||
Class<?> implClass;
|
||||
LOG.debug("Loading coprocessor class " + className + " with path " +
|
||||
path + " and priority " + priority);
|
||||
|
||||
|
@ -223,7 +228,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
try{
|
||||
// switch temporarily to the thread classloader for custom CP
|
||||
currentThread.setContextClassLoader(cl);
|
||||
E cpInstance = loadInstance(implClass, priority, conf);
|
||||
E cpInstance = checkAndLoadInstance(implClass, priority, conf);
|
||||
return cpInstance;
|
||||
} finally {
|
||||
// restore the fresh (host) classloader
|
||||
|
@ -231,16 +236,11 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param implClass Implementation class
|
||||
* @param priority priority
|
||||
* @param conf configuration
|
||||
* @throws java.io.IOException Exception
|
||||
*/
|
||||
public void load(Class<?> implClass, int priority, Configuration conf)
|
||||
@VisibleForTesting
|
||||
public void load(Class<? extends C> implClass, int priority, Configuration conf)
|
||||
throws IOException {
|
||||
E env = loadInstance(implClass, priority, conf);
|
||||
coprocessors.add(env);
|
||||
E env = checkAndLoadInstance(implClass, priority, conf);
|
||||
coprocEnvironments.add(env);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -249,29 +249,22 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
* @param conf configuration
|
||||
* @throws java.io.IOException Exception
|
||||
*/
|
||||
public E loadInstance(Class<?> implClass, int priority, Configuration conf)
|
||||
public E checkAndLoadInstance(Class<?> implClass, int priority, Configuration conf)
|
||||
throws IOException {
|
||||
if (!Coprocessor.class.isAssignableFrom(implClass)) {
|
||||
throw new IOException("Configured class " + implClass.getName() + " must implement "
|
||||
+ Coprocessor.class.getName() + " interface ");
|
||||
}
|
||||
|
||||
// create the instance
|
||||
Coprocessor impl;
|
||||
Object o = null;
|
||||
C impl;
|
||||
try {
|
||||
o = implClass.newInstance();
|
||||
impl = (Coprocessor)o;
|
||||
} catch (InstantiationException e) {
|
||||
throw new IOException(e);
|
||||
} catch (IllegalAccessException e) {
|
||||
impl = checkAndGetInstance(implClass);
|
||||
if (impl == null) {
|
||||
LOG.error("Cannot load coprocessor " + implClass.getSimpleName());
|
||||
return null;
|
||||
}
|
||||
} catch (InstantiationException|IllegalAccessException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
// create the environment
|
||||
E env = createEnvironment(implClass, impl, priority, loadSequence.incrementAndGet(), conf);
|
||||
if (env instanceof Environment) {
|
||||
((Environment)env).startup();
|
||||
}
|
||||
E env = createEnvironment(impl, priority, loadSequence.incrementAndGet(), conf);
|
||||
env.startup();
|
||||
// HBASE-4014: maintain list of loaded coprocessors for later crash analysis
|
||||
// if server (master or regionserver) aborts.
|
||||
coprocessorNames.add(implClass.getName());
|
||||
|
@ -281,28 +274,30 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
/**
|
||||
* Called when a new Coprocessor class is loaded
|
||||
*/
|
||||
public abstract E createEnvironment(Class<?> implClass, Coprocessor instance,
|
||||
int priority, int sequence, Configuration conf);
|
||||
public abstract E createEnvironment(C instance, int priority, int sequence, Configuration conf);
|
||||
|
||||
public void shutdown(CoprocessorEnvironment e) {
|
||||
if (e instanceof Environment) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Stop coprocessor " + e.getInstance().getClass().getName());
|
||||
}
|
||||
((Environment)e).shutdown();
|
||||
} else {
|
||||
LOG.warn("Shutdown called on unknown environment: "+
|
||||
e.getClass().getName());
|
||||
/**
|
||||
* Called when a new Coprocessor class needs to be loaded. Checks if type of the given class
|
||||
* is what the corresponding host implementation expects. If it is of correct type, returns an
|
||||
* instance of the coprocessor to be loaded. If not, returns null.
|
||||
* If an exception occurs when trying to create instance of a coprocessor, it's passed up and
|
||||
* eventually results into server aborting.
|
||||
*/
|
||||
public abstract C checkAndGetInstance(Class<?> implClass)
|
||||
throws InstantiationException, IllegalAccessException;
|
||||
|
||||
public void shutdown(E e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Stop coprocessor " + e.getInstance().getClass().getName());
|
||||
}
|
||||
e.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a coprocessor implementation by class name
|
||||
* @param className the class name
|
||||
* @return the coprocessor, or null if not found
|
||||
* Find coprocessors by full class name or simple name.
|
||||
*/
|
||||
public Coprocessor findCoprocessor(String className) {
|
||||
for (E env: coprocessors) {
|
||||
public C findCoprocessor(String className) {
|
||||
for (E env: coprocEnvironments) {
|
||||
if (env.getInstance().getClass().getName().equals(className) ||
|
||||
env.getInstance().getClass().getSimpleName().equals(className)) {
|
||||
return env.getInstance();
|
||||
|
@ -311,16 +306,26 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
return null;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public <T extends C> T findCoprocessor(Class<T> cls) {
|
||||
for (E env: coprocEnvironments) {
|
||||
if (cls.isAssignableFrom(env.getInstance().getClass())) {
|
||||
return (T) env.getInstance();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find list of coprocessors that extend/implement the given class/interface
|
||||
* @param cls the class/interface to look for
|
||||
* @return the list of coprocessors, or null if not found
|
||||
*/
|
||||
public <T extends Coprocessor> List<T> findCoprocessors(Class<T> cls) {
|
||||
public <T extends C> List<T> findCoprocessors(Class<T> cls) {
|
||||
ArrayList<T> ret = new ArrayList<>();
|
||||
|
||||
for (E env: coprocessors) {
|
||||
Coprocessor cp = env.getInstance();
|
||||
for (E env: coprocEnvironments) {
|
||||
C cp = env.getInstance();
|
||||
|
||||
if(cp != null) {
|
||||
if (cls.isAssignableFrom(cp.getClass())) {
|
||||
|
@ -331,33 +336,14 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find list of CoprocessorEnvironment that extend/implement the given class/interface
|
||||
* @param cls the class/interface to look for
|
||||
* @return the list of CoprocessorEnvironment, or null if not found
|
||||
*/
|
||||
public List<CoprocessorEnvironment> findCoprocessorEnvironment(Class<?> cls) {
|
||||
ArrayList<CoprocessorEnvironment> ret = new ArrayList<>();
|
||||
|
||||
for (E env: coprocessors) {
|
||||
Coprocessor cp = env.getInstance();
|
||||
|
||||
if(cp != null) {
|
||||
if (cls.isAssignableFrom(cp.getClass())) {
|
||||
ret.add(env);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a coprocessor environment by class name
|
||||
* @param className the class name
|
||||
* @return the coprocessor, or null if not found
|
||||
*/
|
||||
public CoprocessorEnvironment findCoprocessorEnvironment(String className) {
|
||||
for (E env: coprocessors) {
|
||||
@VisibleForTesting
|
||||
public E findCoprocessorEnvironment(String className) {
|
||||
for (E env: coprocEnvironments) {
|
||||
if (env.getInstance().getClass().getName().equals(className) ||
|
||||
env.getInstance().getClass().getSimpleName().equals(className)) {
|
||||
return env;
|
||||
|
@ -374,7 +360,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
Set<ClassLoader> getExternalClassLoaders() {
|
||||
Set<ClassLoader> externalClassLoaders = new HashSet<>();
|
||||
final ClassLoader systemClassLoader = this.getClass().getClassLoader();
|
||||
for (E env : coprocessors) {
|
||||
for (E env : coprocEnvironments) {
|
||||
ClassLoader cl = env.getInstance().getClass().getClassLoader();
|
||||
if (cl != systemClassLoader){
|
||||
//do not include system classloader
|
||||
|
@ -388,8 +374,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
* Environment priority comparator.
|
||||
* Coprocessors are chained in sorted order.
|
||||
*/
|
||||
static class EnvironmentPriorityComparator
|
||||
implements Comparator<CoprocessorEnvironment> {
|
||||
static class EnvironmentPriorityComparator implements Comparator<CoprocessorEnvironment> {
|
||||
@Override
|
||||
public int compare(final CoprocessorEnvironment env1,
|
||||
final CoprocessorEnvironment env2) {
|
||||
|
@ -407,153 +392,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulation of the environment of each coprocessor
|
||||
*/
|
||||
public static class Environment implements CoprocessorEnvironment {
|
||||
|
||||
/** The coprocessor */
|
||||
public Coprocessor impl;
|
||||
/** Chaining priority */
|
||||
protected int priority = Coprocessor.PRIORITY_USER;
|
||||
/** Current coprocessor state */
|
||||
Coprocessor.State state = Coprocessor.State.UNINSTALLED;
|
||||
/** Accounting for tables opened by the coprocessor */
|
||||
protected List<Table> openTables =
|
||||
Collections.synchronizedList(new ArrayList<Table>());
|
||||
private int seq;
|
||||
private Configuration conf;
|
||||
private ClassLoader classLoader;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param impl the coprocessor instance
|
||||
* @param priority chaining priority
|
||||
*/
|
||||
public Environment(final Coprocessor impl, final int priority,
|
||||
final int seq, final Configuration conf) {
|
||||
this.impl = impl;
|
||||
this.classLoader = impl.getClass().getClassLoader();
|
||||
this.priority = priority;
|
||||
this.state = Coprocessor.State.INSTALLED;
|
||||
this.seq = seq;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
/** Initialize the environment */
|
||||
public void startup() throws IOException {
|
||||
if (state == Coprocessor.State.INSTALLED ||
|
||||
state == Coprocessor.State.STOPPED) {
|
||||
state = Coprocessor.State.STARTING;
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(this.getClassLoader());
|
||||
impl.start(this);
|
||||
state = Coprocessor.State.ACTIVE;
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(hostClassLoader);
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Not starting coprocessor "+impl.getClass().getName()+
|
||||
" because not inactive (state="+state.toString()+")");
|
||||
}
|
||||
}
|
||||
|
||||
/** Clean up the environment */
|
||||
protected void shutdown() {
|
||||
if (state == Coprocessor.State.ACTIVE) {
|
||||
state = Coprocessor.State.STOPPING;
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(this.getClassLoader());
|
||||
impl.stop(this);
|
||||
state = Coprocessor.State.STOPPED;
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(hostClassLoader);
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+
|
||||
" because not active (state="+state.toString()+")");
|
||||
}
|
||||
synchronized (openTables) {
|
||||
// clean up any table references
|
||||
for (Table table: openTables) {
|
||||
try {
|
||||
((HTableWrapper)table).internalClose();
|
||||
} catch (IOException e) {
|
||||
// nothing can be done here
|
||||
LOG.warn("Failed to close " +
|
||||
table.getName(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Coprocessor getInstance() {
|
||||
return impl;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClassLoader getClassLoader() {
|
||||
return classLoader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPriority() {
|
||||
return priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getLoadSequence() {
|
||||
return seq;
|
||||
}
|
||||
|
||||
/** @return the coprocessor environment version */
|
||||
@Override
|
||||
public int getVersion() {
|
||||
return Coprocessor.VERSION;
|
||||
}
|
||||
|
||||
/** @return the HBase release */
|
||||
@Override
|
||||
public String getHBaseVersion() {
|
||||
return VersionInfo.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a table from within the Coprocessor environment
|
||||
* @param tableName the table name
|
||||
* @return an interface for manipulating the table
|
||||
* @exception java.io.IOException Exception
|
||||
*/
|
||||
@Override
|
||||
public Table getTable(TableName tableName) throws IOException {
|
||||
return this.getTable(tableName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a table from within the Coprocessor environment
|
||||
* @param tableName the table name
|
||||
* @return an interface for manipulating the table
|
||||
* @exception java.io.IOException Exception
|
||||
*/
|
||||
@Override
|
||||
public Table getTable(TableName tableName, ExecutorService pool) throws IOException {
|
||||
return HTableWrapper.createWrapper(openTables, tableName, this, pool);
|
||||
}
|
||||
}
|
||||
|
||||
protected void abortServer(final CoprocessorEnvironment environment, final Throwable e) {
|
||||
protected void abortServer(final E environment, final Throwable e) {
|
||||
abortServer(environment.getInstance().getClass().getName(), e);
|
||||
}
|
||||
|
||||
|
@ -586,8 +425,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
// etc) mention this nuance of our exception handling so that coprocessor can throw appropriate
|
||||
// exceptions depending on situation. If any changes are made to this logic, make sure to
|
||||
// update all classes' comments.
|
||||
protected void handleCoprocessorThrowable(final CoprocessorEnvironment env, final Throwable e)
|
||||
throws IOException {
|
||||
protected void handleCoprocessorThrowable(final E env, final Throwable e) throws IOException {
|
||||
if (e instanceof IOException) {
|
||||
throw (IOException)e;
|
||||
}
|
||||
|
@ -610,7 +448,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
"environment",e);
|
||||
}
|
||||
|
||||
coprocessors.remove(env);
|
||||
coprocEnvironments.remove(env);
|
||||
try {
|
||||
shutdown(env);
|
||||
} catch (Exception x) {
|
||||
|
@ -695,4 +533,192 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
"'. Details of the problem: " + message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementations defined function to get an observer of type {@code O} from a coprocessor of
|
||||
* type {@code C}. Concrete implementations of CoprocessorHost define one getter for each
|
||||
* observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for
|
||||
* each of RegionObserver, EndpointObserver and BulkLoadObserver.
|
||||
* These getters are used by {@code ObserverOperation} to get appropriate observer from the
|
||||
* coprocessor.
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface ObserverGetter<C, O> extends Function<C, Optional<O>> {}
|
||||
|
||||
private abstract class ObserverOperation<O> extends ObserverContext<E> {
|
||||
ObserverGetter<C, O> observerGetter;
|
||||
|
||||
ObserverOperation(ObserverGetter<C, O> observerGetter) {
|
||||
this(observerGetter, RpcServer.getRequestUser());
|
||||
}
|
||||
|
||||
ObserverOperation(ObserverGetter<C, O> observerGetter, User user) {
|
||||
super(user);
|
||||
this.observerGetter = observerGetter;
|
||||
}
|
||||
|
||||
abstract void callObserver() throws IOException;
|
||||
protected void postEnvCall() {}
|
||||
}
|
||||
|
||||
// Can't derive ObserverOperation from ObserverOperationWithResult (R = Void) because then all
|
||||
// ObserverCaller implementations will have to have a return statement.
|
||||
// O = observer, E = environment, C = coprocessor, R=result type
|
||||
public abstract class ObserverOperationWithoutResult<O> extends ObserverOperation<O> {
|
||||
protected abstract void call(O observer) throws IOException;
|
||||
|
||||
public ObserverOperationWithoutResult(ObserverGetter<C, O> observerGetter) {
|
||||
super(observerGetter);
|
||||
}
|
||||
|
||||
public ObserverOperationWithoutResult(ObserverGetter<C, O> observerGetter, User user) {
|
||||
super(observerGetter, user);
|
||||
}
|
||||
|
||||
/**
|
||||
* In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor}
|
||||
* has BulkLoadObserver, RegionObserver, etc), some implementations may not need all
|
||||
* observers, in which case they will return null for that observer's getter.
|
||||
* We simply ignore such cases.
|
||||
*/
|
||||
@Override
|
||||
void callObserver() throws IOException {
|
||||
Optional<O> observer = observerGetter.apply(getEnvironment().getInstance());
|
||||
if (observer.isPresent()) {
|
||||
call(observer.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public abstract class ObserverOperationWithResult<O, R> extends ObserverOperation<O> {
|
||||
protected abstract R call(O observer) throws IOException;
|
||||
|
||||
private R result;
|
||||
|
||||
public ObserverOperationWithResult(ObserverGetter<C, O> observerGetter) {
|
||||
super(observerGetter);
|
||||
}
|
||||
|
||||
public ObserverOperationWithResult(ObserverGetter<C, O> observerGetter, User user) {
|
||||
super(observerGetter, user);
|
||||
}
|
||||
|
||||
void setResult(final R result) {
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
protected R getResult() {
|
||||
return this.result;
|
||||
}
|
||||
|
||||
void callObserver() throws IOException {
|
||||
Optional<O> observer = observerGetter.apply(getEnvironment().getInstance());
|
||||
if (observer.isPresent()) {
|
||||
result = call(observer.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Functions to execute observer hooks and handle results (if any)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
protected <O, R> R execOperationWithResult(final R defaultValue,
|
||||
final ObserverOperationWithResult<O, R> observerOperation) throws IOException {
|
||||
if (observerOperation == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
observerOperation.setResult(defaultValue);
|
||||
execOperation(observerOperation);
|
||||
return observerOperation.getResult();
|
||||
}
|
||||
|
||||
// what does bypass mean?
|
||||
protected <O, R> R execOperationWithResult(final boolean ifBypass, final R defaultValue,
|
||||
final ObserverOperationWithResult<O, R> observerOperation) throws IOException {
|
||||
if (observerOperation == null) {
|
||||
return ifBypass ? null : defaultValue;
|
||||
} else {
|
||||
observerOperation.setResult(defaultValue);
|
||||
boolean bypass = execOperation(true, observerOperation);
|
||||
R result = observerOperation.getResult();
|
||||
return bypass == ifBypass ? result : null;
|
||||
}
|
||||
}
|
||||
|
||||
protected <O> boolean execOperation(final ObserverOperation<O> observerOperation)
|
||||
throws IOException {
|
||||
return execOperation(true, observerOperation);
|
||||
}
|
||||
|
||||
protected <O> boolean execOperation(final boolean earlyExit,
|
||||
final ObserverOperation<O> observerOperation) throws IOException {
|
||||
if (observerOperation == null) return false;
|
||||
boolean bypass = false;
|
||||
List<E> envs = coprocEnvironments.get();
|
||||
for (E env : envs) {
|
||||
observerOperation.prepare(env);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observerOperation.callObserver();
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
bypass |= observerOperation.shouldBypass();
|
||||
if (earlyExit && observerOperation.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
observerOperation.postEnvCall();
|
||||
}
|
||||
return bypass;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Coprocessor classes can be configured in any order, based on that priority is set and
|
||||
* chained in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is
|
||||
* going down. This function first calls coprocessor methods (using ObserverOperation.call())
|
||||
* and then shutdowns the environment in postEnvCall(). <br>
|
||||
* Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors
|
||||
* may remain shutdown if any exception occurs during next coprocessor execution which prevent
|
||||
* master/regionserver stop or cluster shutdown. (Refer:
|
||||
* <a href="https://issues.apache.org/jira/browse/HBASE-16663">HBASE-16663</a>
|
||||
* @return true if bypaas coprocessor execution, false if not.
|
||||
* @throws IOException
|
||||
*/
|
||||
protected <O> boolean execShutdown(final ObserverOperation<O> observerOperation)
|
||||
throws IOException {
|
||||
if (observerOperation == null) return false;
|
||||
boolean bypass = false;
|
||||
List<E> envs = coprocEnvironments.get();
|
||||
// Iterate the coprocessors and execute ObserverOperation's call()
|
||||
for (E env : envs) {
|
||||
observerOperation.prepare(env);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observerOperation.callObserver();
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
bypass |= observerOperation.shouldBypass();
|
||||
if (observerOperation.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate the coprocessors and execute ObserverOperation's postEnvCall()
|
||||
for (E env : envs) {
|
||||
observerOperation.prepare(env);
|
||||
observerOperation.postEnvCall();
|
||||
}
|
||||
return bypass;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|||
/**
|
||||
* Coprocessor endpoints providing protobuf services should implement this
|
||||
* interface and return the {@link Service} instance via {@link #getService()}.
|
||||
* @deprecated Since 2.0. Will be removed in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface CoprocessorService {
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Classes to help maintain backward compatibility with now deprecated {@link CoprocessorService}
|
||||
* and {@link SingletonCoprocessorService}.
|
||||
* From 2.0 onwards, implementors of coprocessor service should also implement the relevant
|
||||
* coprocessor class (For eg {@link MasterCoprocessor} for coprocessor service in master), and
|
||||
* override get*Service() method to return the {@link com.google.protobuf.Service} object.
|
||||
* To maintain backward compatibility with 1.0 implementation, we'll wrap implementation of
|
||||
* CoprocessorService/SingletonCoprocessorService in the new
|
||||
* {Master, Region, RegionServer}Coprocessor class.
|
||||
* Since there is no backward compatibility guarantee for Observers, we leave get*Observer() to
|
||||
* default which returns null.
|
||||
* This approach to maintain backward compatibility seems cleaner and more explicit.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@Deprecated
|
||||
public class CoprocessorServiceBackwardCompatiblity {
|
||||
|
||||
static public class MasterCoprocessorService implements MasterCoprocessor {
|
||||
|
||||
CoprocessorService service;
|
||||
|
||||
public MasterCoprocessorService(CoprocessorService service) {
|
||||
this.service = service;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(service.getService());
|
||||
}
|
||||
}
|
||||
|
||||
static public class RegionCoprocessorService implements RegionCoprocessor {
|
||||
|
||||
CoprocessorService service;
|
||||
|
||||
public RegionCoprocessorService(CoprocessorService service) {
|
||||
this.service = service;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(service.getService());
|
||||
}
|
||||
}
|
||||
|
||||
static public class RegionServerCoprocessorService implements RegionServerCoprocessor {
|
||||
|
||||
SingletonCoprocessorService service;
|
||||
|
||||
public RegionServerCoprocessorService(SingletonCoprocessorService service) {
|
||||
this.service = service;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(service.getService());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -50,7 +50,7 @@ import com.google.protobuf.Service;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface EndpointObserver extends Coprocessor {
|
||||
public interface EndpointObserver {
|
||||
|
||||
/**
|
||||
* Called before an Endpoint service method is invoked.
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface MasterCoprocessor extends Coprocessor {
|
||||
default Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
|||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment<MasterCoprocessor> {
|
||||
/** @return reference to the HMaster services */
|
||||
MasterServices getMasterServices();
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface MasterObserver extends Coprocessor {
|
||||
public interface MasterObserver {
|
||||
/**
|
||||
* Called before a new table is created by
|
||||
* {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create
|
||||
|
|
|
@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -77,8 +77,7 @@ import com.google.protobuf.Service;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public class MultiRowMutationEndpoint extends MultiRowMutationService implements
|
||||
CoprocessorService, Coprocessor {
|
||||
public class MultiRowMutationEndpoint extends MultiRowMutationService implements RegionCoprocessor {
|
||||
private RegionCoprocessorEnvironment env;
|
||||
@Override
|
||||
public void mutateRows(RpcController controller, MutateRowsRequest request,
|
||||
|
@ -120,10 +119,9 @@ CoprocessorService, Coprocessor {
|
|||
done.run(response);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return this;
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -116,13 +116,13 @@ public class ObserverContext<E extends CoprocessorEnvironment> {
|
|||
* @param env The coprocessor environment to set
|
||||
* @param context An existing ObserverContext instance to use, or <code>null</code>
|
||||
* to create a new instance
|
||||
* @param <T> The environment type for the context
|
||||
* @param <E> The environment type for the context
|
||||
* @return An instance of <code>ObserverContext</code> with the environment set
|
||||
*/
|
||||
@Deprecated
|
||||
// TODO: Remove this method, ObserverContext should not depend on RpcServer
|
||||
public static <T extends CoprocessorEnvironment> ObserverContext<T> createAndPrepare(
|
||||
T env, ObserverContext<T> context) {
|
||||
public static <E extends CoprocessorEnvironment> ObserverContext<E> createAndPrepare(
|
||||
E env, ObserverContext< E> context) {
|
||||
if (context == null) {
|
||||
context = new ObserverContext<>(RpcServer.getRequestUser());
|
||||
}
|
||||
|
@ -140,11 +140,11 @@ public class ObserverContext<E extends CoprocessorEnvironment> {
|
|||
* @param context An existing ObserverContext instance to use, or <code>null</code>
|
||||
* to create a new instance
|
||||
* @param user The requesting caller for the execution context
|
||||
* @param <T> The environment type for the context
|
||||
* @param <E> The environment type for the context
|
||||
* @return An instance of <code>ObserverContext</code> with the environment set
|
||||
*/
|
||||
public static <T extends CoprocessorEnvironment> ObserverContext<T> createAndPrepare(
|
||||
T env, ObserverContext<T> context, User user) {
|
||||
public static <E extends CoprocessorEnvironment> ObserverContext<E> createAndPrepare(
|
||||
E env, ObserverContext<E> context, User user) {
|
||||
if (context == null) {
|
||||
context = new ObserverContext<>(user);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface RegionCoprocessor extends Coprocessor {
|
||||
|
||||
default Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
default Optional<EndpointObserver> getEndpointObserver() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
default Optional<BulkLoadObserver> getBulkLoadObserver() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment<RegionCoprocessor> {
|
||||
/** @return the region associated with this coprocessor */
|
||||
Region getRegion();
|
||||
|
||||
|
@ -61,6 +61,4 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment {
|
|||
// so we do not want to allow coprocessors to export metrics at the region level. We can allow
|
||||
// getMetricRegistryForTable() to allow coprocessors to track metrics per-table, per-regionserver.
|
||||
MetricRegistry getMetricRegistryForRegionServer();
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
// TODO as method signatures need to break, update to
|
||||
// ObserverContext<? extends RegionCoprocessorEnvironment>
|
||||
// so we can use additional environment state that isn't exposed to coprocessors.
|
||||
public interface RegionObserver extends Coprocessor {
|
||||
public interface RegionObserver {
|
||||
/** Mutation type for postMutationBeforeWAL hook */
|
||||
enum MutationType {
|
||||
APPEND, INCREMENT
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface RegionServerCoprocessor extends Coprocessor {
|
||||
default Optional<RegionServerObserver> getRegionServerObserver() {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
|
@ -27,7 +27,8 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface RegionServerCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
public interface RegionServerCoprocessorEnvironment
|
||||
extends CoprocessorEnvironment<RegionServerCoprocessor> {
|
||||
/**
|
||||
* Gets the region server services.
|
||||
*
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface RegionServerObserver extends Coprocessor {
|
||||
public interface RegionServerObserver {
|
||||
/**
|
||||
* Called before stopping region server.
|
||||
* @param ctx the environment to interact with the framework and region server.
|
||||
|
|
|
@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|||
/**
|
||||
* Coprocessor endpoints registered once per server and providing protobuf services should implement
|
||||
* this interface and return the {@link Service} instance via {@link #getService()}.
|
||||
* @deprecated Since 2.0. Will be removed in 3.0
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface SingletonCoprocessorService {
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* WALCoprocessor don't support loading services using {@link #getService()}.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface WALCoprocessor extends Coprocessor {
|
||||
Optional<WALObserver> getWALObserver();
|
||||
|
||||
}
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.wal.WAL;
|
|||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface WALCoprocessorEnvironment extends CoprocessorEnvironment {
|
||||
public interface WALCoprocessorEnvironment extends CoprocessorEnvironment<WALCoprocessor> {
|
||||
/** @return reference to the region server's WAL */
|
||||
WAL getWAL();
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public interface WALObserver extends Coprocessor {
|
||||
public interface WALObserver {
|
||||
/**
|
||||
* Called before a {@link WALEdit}
|
||||
* is writen to WAL.
|
||||
|
|
|
@ -181,9 +181,8 @@ To implement an Endpoint, you need to:
|
|||
<a href="https://developers.google.com/protocol-buffers/docs/proto#services">protocol buffer guide</a>
|
||||
for more details on defining services.</li>
|
||||
<li>Generate the Service and Message code using the protoc compiler</li>
|
||||
<li>Implement the generated Service interface in your coprocessor class and implement the
|
||||
<code>CoprocessorService</code> interface. The <code>CoprocessorService.getService()</code>
|
||||
method should return a reference to the Endpoint's protocol buffer Service instance.
|
||||
<li>Implement the generated Service interface and override get*Service() method in
|
||||
relevant Coprocessor to return a reference to the Endpoint's protocol buffer Service instance.
|
||||
</ul>
|
||||
<p>
|
||||
For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,6 +17,7 @@
|
|||
package org.apache.hadoop.hbase.quotas;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
|
@ -24,6 +25,7 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -35,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
|
|||
* are deleted.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class MasterSpaceQuotaObserver implements MasterObserver {
|
||||
public class MasterSpaceQuotaObserver implements MasterCoprocessor, MasterObserver {
|
||||
public static final String REMOVE_QUOTA_ON_TABLE_DELETE = "hbase.quota.remove.on.table.delete";
|
||||
public static final boolean REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT = true;
|
||||
|
||||
|
@ -43,6 +45,11 @@ public class MasterSpaceQuotaObserver implements MasterObserver {
|
|||
private Configuration conf;
|
||||
private boolean quotasEnabled = false;
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment ctx) throws IOException {
|
||||
this.cpEnv = ctx;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,33 +19,29 @@
|
|||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.lang3.ClassUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
|
||||
import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.SingletonCoprocessorService;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
|
||||
@InterfaceStability.Evolving
|
||||
public class RegionServerCoprocessorHost extends
|
||||
CoprocessorHost<RegionServerCoprocessorHost.RegionServerEnvironment> {
|
||||
CoprocessorHost<RegionServerCoprocessor, RegionServerCoprocessorEnvironment> {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(RegionServerCoprocessorHost.class);
|
||||
|
||||
|
@ -70,242 +66,149 @@ public class RegionServerCoprocessorHost extends
|
|||
}
|
||||
|
||||
@Override
|
||||
public RegionServerEnvironment createEnvironment(Class<?> implClass,
|
||||
Coprocessor instance, int priority, int sequence, Configuration conf) {
|
||||
return new RegionServerEnvironment(implClass, instance, priority,
|
||||
sequence, conf, this.rsServices);
|
||||
public RegionServerEnvironment createEnvironment(
|
||||
RegionServerCoprocessor instance, int priority, int sequence, Configuration conf) {
|
||||
return new RegionServerEnvironment(instance, priority, sequence, conf, this.rsServices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RegionServerCoprocessor checkAndGetInstance(Class<?> implClass)
|
||||
throws InstantiationException, IllegalAccessException {
|
||||
if (RegionServerCoprocessor.class.isAssignableFrom(implClass)) {
|
||||
return (RegionServerCoprocessor)implClass.newInstance();
|
||||
} else if (SingletonCoprocessorService.class.isAssignableFrom(implClass)) {
|
||||
// For backward compatibility with old CoprocessorService impl which don't extend
|
||||
// RegionCoprocessor.
|
||||
return new CoprocessorServiceBackwardCompatiblity.RegionServerCoprocessorService(
|
||||
(SingletonCoprocessorService)implClass.newInstance());
|
||||
} else {
|
||||
LOG.error(implClass.getName() + " is not of type RegionServerCoprocessor. Check the "
|
||||
+ "configuration " + CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private ObserverGetter<RegionServerCoprocessor, RegionServerObserver> rsObserverGetter =
|
||||
RegionServerCoprocessor::getRegionServerObserver;
|
||||
|
||||
abstract class RegionServerObserverOperation extends
|
||||
ObserverOperationWithoutResult<RegionServerObserver> {
|
||||
public RegionServerObserverOperation() {
|
||||
super(rsObserverGetter);
|
||||
}
|
||||
|
||||
public RegionServerObserverOperation(User user) {
|
||||
super(rsObserverGetter, user);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// RegionServerObserver operations
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
public void preStop(String message, User user) throws IOException {
|
||||
// While stopping the region server all coprocessors method should be executed first then the
|
||||
// coprocessor should be cleaned up.
|
||||
execShutdown(coprocessors.isEmpty() ? null : new CoprocessorOperation(user) {
|
||||
execShutdown(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation(user) {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.preStopRegionServer(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.preStopRegionServer(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postEnvCall(RegionServerEnvironment env) {
|
||||
public void postEnvCall() {
|
||||
// invoke coprocessor stop method
|
||||
shutdown(env);
|
||||
shutdown(this.getEnvironment());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void preRollWALWriterRequest() throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.preRollWALWriterRequest(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.preRollWALWriterRequest(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void postRollWALWriterRequest() throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.postRollWALWriterRequest(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.postRollWALWriterRequest(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void preReplicateLogEntries()
|
||||
throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.preReplicateLogEntries(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.preReplicateLogEntries(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void postReplicateLogEntries()
|
||||
throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.postReplicateLogEntries(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.postReplicateLogEntries(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public ReplicationEndpoint postCreateReplicationEndPoint(final ReplicationEndpoint endpoint)
|
||||
throws IOException {
|
||||
return execOperationWithResult(endpoint, coprocessors.isEmpty() ? null
|
||||
: new CoprocessOperationWithResult<ReplicationEndpoint>() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
setResult(oserver.postCreateReplicationEndPoint(ctx, getResult()));
|
||||
}
|
||||
});
|
||||
return execOperationWithResult(endpoint, coprocEnvironments.isEmpty() ? null :
|
||||
new ObserverOperationWithResult<RegionServerObserver, ReplicationEndpoint>(
|
||||
rsObserverGetter) {
|
||||
@Override
|
||||
public ReplicationEndpoint call(RegionServerObserver observer) throws IOException {
|
||||
return observer.postCreateReplicationEndPoint(this, getResult());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void preClearCompactionQueues() throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.preClearCompactionQueues(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.preClearCompactionQueues(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void postClearCompactionQueues() throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation() {
|
||||
@Override
|
||||
public void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
|
||||
oserver.postClearCompactionQueues(ctx);
|
||||
public void call(RegionServerObserver observer) throws IOException {
|
||||
observer.postClearCompactionQueues(this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private <T> T execOperationWithResult(final T defaultValue,
|
||||
final CoprocessOperationWithResult<T> ctx) throws IOException {
|
||||
if (ctx == null)
|
||||
return defaultValue;
|
||||
ctx.setResult(defaultValue);
|
||||
execOperation(ctx);
|
||||
return ctx.getResult();
|
||||
}
|
||||
|
||||
private static abstract class CoprocessorOperation
|
||||
extends ObserverContext<RegionServerCoprocessorEnvironment> {
|
||||
public CoprocessorOperation() {
|
||||
this(RpcServer.getRequestUser());
|
||||
}
|
||||
|
||||
public CoprocessorOperation(User user) {
|
||||
super(user);
|
||||
}
|
||||
|
||||
public abstract void call(RegionServerObserver oserver,
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException;
|
||||
|
||||
public void postEnvCall(RegionServerEnvironment env) {
|
||||
}
|
||||
}
|
||||
|
||||
private static abstract class CoprocessOperationWithResult<T> extends CoprocessorOperation {
|
||||
private T result = null;
|
||||
|
||||
public void setResult(final T result) {
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
public T getResult() {
|
||||
return this.result;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean execOperation(final CoprocessorOperation ctx) throws IOException {
|
||||
if (ctx == null) return false;
|
||||
boolean bypass = false;
|
||||
List<RegionServerEnvironment> envs = coprocessors.get();
|
||||
for (int i = 0; i < envs.size(); i++) {
|
||||
RegionServerEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof RegionServerObserver) {
|
||||
ctx.prepare(env);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
ctx.call((RegionServerObserver)env.getInstance(), ctx);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
bypass |= ctx.shouldBypass();
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ctx.postEnvCall(env);
|
||||
}
|
||||
return bypass;
|
||||
}
|
||||
|
||||
/**
|
||||
* RegionServer coprocessor classes can be configured in any order, based on that priority is set
|
||||
* and chained in a sorted order. For preStop(), coprocessor methods are invoked in call() and
|
||||
* environment is shutdown in postEnvCall(). <br>
|
||||
* Need to execute all coprocessor methods first then postEnvCall(), otherwise some coprocessors
|
||||
* may remain shutdown if any exception occurs during next coprocessor execution which prevent
|
||||
* RegionServer stop. (Refer:
|
||||
* <a href="https://issues.apache.org/jira/browse/HBASE-16663">HBASE-16663</a>
|
||||
* @param ctx CoprocessorOperation
|
||||
* @return true if bypaas coprocessor execution, false if not.
|
||||
* @throws IOException
|
||||
*/
|
||||
private boolean execShutdown(final CoprocessorOperation ctx) throws IOException {
|
||||
if (ctx == null) return false;
|
||||
boolean bypass = false;
|
||||
List<RegionServerEnvironment> envs = coprocessors.get();
|
||||
int envsSize = envs.size();
|
||||
// Iterate the coprocessors and execute CoprocessorOperation's call()
|
||||
for (int i = 0; i < envsSize; i++) {
|
||||
RegionServerEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof RegionServerObserver) {
|
||||
ctx.prepare(env);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
ctx.call((RegionServerObserver) env.getInstance(), ctx);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
bypass |= ctx.shouldBypass();
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate the coprocessors and execute CoprocessorOperation's postEnvCall()
|
||||
for (int i = 0; i < envsSize; i++) {
|
||||
RegionServerEnvironment env = envs.get(i);
|
||||
ctx.postEnvCall(env);
|
||||
}
|
||||
return bypass;
|
||||
}
|
||||
|
||||
/**
|
||||
* Coprocessor environment extension providing access to region server
|
||||
* related services.
|
||||
*/
|
||||
static class RegionServerEnvironment extends CoprocessorHost.Environment
|
||||
private static class RegionServerEnvironment extends BaseEnvironment<RegionServerCoprocessor>
|
||||
implements RegionServerCoprocessorEnvironment {
|
||||
private final RegionServerServices regionServerServices;
|
||||
private final MetricRegistry metricRegistry;
|
||||
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BC_UNCONFIRMED_CAST",
|
||||
justification="Intentional; FB has trouble detecting isAssignableFrom")
|
||||
public RegionServerEnvironment(final Class<?> implClass,
|
||||
final Coprocessor impl, final int priority, final int seq,
|
||||
final Configuration conf, final RegionServerServices services) {
|
||||
public RegionServerEnvironment(final RegionServerCoprocessor impl, final int priority,
|
||||
final int seq, final Configuration conf, final RegionServerServices services) {
|
||||
super(impl, priority, seq, conf);
|
||||
this.regionServerServices = services;
|
||||
for (Object itf : ClassUtils.getAllInterfaces(implClass)) {
|
||||
Class<?> c = (Class<?>) itf;
|
||||
if (SingletonCoprocessorService.class.isAssignableFrom(c)) {// FindBugs: BC_UNCONFIRMED_CAST
|
||||
this.regionServerServices.registerService(
|
||||
((SingletonCoprocessorService) impl).getService());
|
||||
break;
|
||||
}
|
||||
}
|
||||
impl.getService().ifPresent(regionServerServices::registerService);
|
||||
this.metricRegistry =
|
||||
MetricsCoprocessor.createRegistryForRSCoprocessor(implClass.getName());
|
||||
MetricsCoprocessor.createRegistryForRSCoprocessor(impl.getClass().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -319,32 +222,9 @@ public class RegionServerCoprocessorHost extends
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void shutdown() {
|
||||
public void shutdown() {
|
||||
super.shutdown();
|
||||
MetricsCoprocessor.removeRegistry(metricRegistry);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment priority comparator. Coprocessors are chained in sorted
|
||||
* order.
|
||||
*/
|
||||
static class EnvironmentPriorityComparator implements
|
||||
Comparator<CoprocessorEnvironment> {
|
||||
@Override
|
||||
public int compare(final CoprocessorEnvironment env1,
|
||||
final CoprocessorEnvironment env2) {
|
||||
if (env1.getPriority() < env2.getPriority()) {
|
||||
return -1;
|
||||
} else if (env1.getPriority() > env2.getPriority()) {
|
||||
return 1;
|
||||
}
|
||||
if (env1.getLoadSequence() < env2.getLoadSequence()) {
|
||||
return -1;
|
||||
} else if (env1.getLoadSequence() > env2.getLoadSequence()) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||
|
@ -138,38 +140,17 @@ public class SecureBulkLoadManager {
|
|||
|
||||
public String prepareBulkLoad(final Region region, final PrepareBulkLoadRequest request)
|
||||
throws IOException {
|
||||
List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers(region);
|
||||
region.getCoprocessorHost().prePrepareBulkLoad(getActiveUser());
|
||||
|
||||
if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) {
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx = new ObserverContext<>(getActiveUser());
|
||||
ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost()
|
||||
.findCoprocessorEnvironment(BulkLoadObserver.class).get(0));
|
||||
|
||||
for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
|
||||
bulkLoadObserver.prePrepareBulkLoad(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
String bulkToken =
|
||||
createStagingDir(baseStagingDir, getActiveUser(), region.getTableDescriptor().getTableName())
|
||||
.toString();
|
||||
String bulkToken = createStagingDir(baseStagingDir, getActiveUser(),
|
||||
region.getTableDescriptor().getTableName()).toString();
|
||||
|
||||
return bulkToken;
|
||||
}
|
||||
|
||||
public void cleanupBulkLoad(final Region region, final CleanupBulkLoadRequest request)
|
||||
throws IOException {
|
||||
List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers(region);
|
||||
|
||||
if (bulkLoadObservers != null && bulkLoadObservers.size() != 0) {
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx = new ObserverContext<>(getActiveUser());
|
||||
ctx.prepare((RegionCoprocessorEnvironment) region.getCoprocessorHost()
|
||||
.findCoprocessorEnvironment(BulkLoadObserver.class).get(0));
|
||||
|
||||
for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
|
||||
bulkLoadObserver.preCleanupBulkLoad(ctx);
|
||||
}
|
||||
}
|
||||
region.getCoprocessorHost().preCleanupBulkLoad(getActiveUser());
|
||||
|
||||
Path path = new Path(request.getBulkToken());
|
||||
if (!fs.delete(path, true)) {
|
||||
|
@ -275,13 +256,6 @@ public class SecureBulkLoadManager {
|
|||
return map;
|
||||
}
|
||||
|
||||
private List<BulkLoadObserver> getBulkLoadObservers(Region region) {
|
||||
List<BulkLoadObserver> coprocessorList =
|
||||
region.getCoprocessorHost().findCoprocessors(BulkLoadObserver.class);
|
||||
|
||||
return coprocessorList;
|
||||
}
|
||||
|
||||
private Path createStagingDir(Path baseDir,
|
||||
User user,
|
||||
TableName tableName) throws IOException {
|
||||
|
|
|
@ -21,22 +21,23 @@
|
|||
package org.apache.hadoop.hbase.regionserver.wal;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.WALCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.WALObserver;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.wal.WAL;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Implements the coprocessor environment and runtime support for coprocessors
|
||||
|
@ -44,12 +45,13 @@ import org.apache.hadoop.hbase.wal.WALKey;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class WALCoprocessorHost
|
||||
extends CoprocessorHost<WALCoprocessorHost.WALEnvironment> {
|
||||
extends CoprocessorHost<WALCoprocessor, WALCoprocessorEnvironment> {
|
||||
private static final Log LOG = LogFactory.getLog(WALCoprocessorHost.class);
|
||||
|
||||
/**
|
||||
* Encapsulation of the environment of each coprocessor
|
||||
*/
|
||||
static class WALEnvironment extends CoprocessorHost.Environment
|
||||
static class WALEnvironment extends BaseEnvironment<WALCoprocessor>
|
||||
implements WALCoprocessorEnvironment {
|
||||
|
||||
private final WAL wal;
|
||||
|
@ -63,19 +65,18 @@ public class WALCoprocessorHost
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param implClass - not used
|
||||
* @param impl the coprocessor instance
|
||||
* @param priority chaining priority
|
||||
* @param seq load sequence
|
||||
* @param conf configuration
|
||||
* @param wal WAL
|
||||
*/
|
||||
public WALEnvironment(Class<?> implClass, final Coprocessor impl,
|
||||
final int priority, final int seq, final Configuration conf,
|
||||
final WAL wal) {
|
||||
private WALEnvironment(final WALCoprocessor impl, final int priority, final int seq,
|
||||
final Configuration conf, final WAL wal) {
|
||||
super(impl, priority, seq, conf);
|
||||
this.wal = wal;
|
||||
this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor(implClass.getName());
|
||||
this.metricRegistry = MetricsCoprocessor.createRegistryForWALCoprocessor(
|
||||
impl.getClass().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -84,7 +85,7 @@ public class WALCoprocessorHost
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void shutdown() {
|
||||
public void shutdown() {
|
||||
super.shutdown();
|
||||
MetricsCoprocessor.removeRegistry(this.metricRegistry);
|
||||
}
|
||||
|
@ -111,13 +112,34 @@ public class WALCoprocessorHost
|
|||
}
|
||||
|
||||
@Override
|
||||
public WALEnvironment createEnvironment(final Class<?> implClass,
|
||||
final Coprocessor instance, final int priority, final int seq,
|
||||
final Configuration conf) {
|
||||
return new WALEnvironment(implClass, instance, priority, seq, conf,
|
||||
this.wal);
|
||||
public WALEnvironment createEnvironment(final WALCoprocessor instance, final int priority,
|
||||
final int seq, final Configuration conf) {
|
||||
return new WALEnvironment(instance, priority, seq, conf, this.wal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public WALCoprocessor checkAndGetInstance(Class<?> implClass)
|
||||
throws InstantiationException, IllegalAccessException {
|
||||
if (WALCoprocessor.class.isAssignableFrom(implClass)) {
|
||||
return (WALCoprocessor)implClass.newInstance();
|
||||
} else {
|
||||
LOG.error(implClass.getName() + " is not of type WALCoprocessor. Check the "
|
||||
+ "configuration " + CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private ObserverGetter<WALCoprocessor, WALObserver> walObserverGetter =
|
||||
WALCoprocessor::getWALObserver;
|
||||
|
||||
abstract class WALObserverOperation extends
|
||||
ObserverOperationWithoutResult<WALObserver> {
|
||||
public WALObserverOperation() {
|
||||
super(walObserverGetter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param info
|
||||
* @param logKey
|
||||
|
@ -127,32 +149,13 @@ public class WALCoprocessorHost
|
|||
*/
|
||||
public boolean preWALWrite(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
|
||||
throws IOException {
|
||||
boolean bypass = false;
|
||||
if (this.coprocessors == null || this.coprocessors.isEmpty()) return bypass;
|
||||
ObserverContext<WALCoprocessorEnvironment> ctx = null;
|
||||
List<WALEnvironment> envs = coprocessors.get();
|
||||
for (int i = 0; i < envs.size(); i++) {
|
||||
WALEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof WALObserver) {
|
||||
final WALObserver observer = (WALObserver)env.getInstance();
|
||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observer.preWALWrite(ctx, info, logKey, logEdit);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
bypass |= ctx.shouldBypass();
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
return execOperationWithResult(false, coprocEnvironments.isEmpty() ? null :
|
||||
new ObserverOperationWithResult<WALObserver, Boolean>(walObserverGetter) {
|
||||
@Override
|
||||
public Boolean call(WALObserver oserver) throws IOException {
|
||||
return oserver.preWALWrite(this, info, logKey, logEdit);
|
||||
}
|
||||
}
|
||||
return bypass;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,29 +166,12 @@ public class WALCoprocessorHost
|
|||
*/
|
||||
public void postWALWrite(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
|
||||
throws IOException {
|
||||
if (this.coprocessors == null || this.coprocessors.isEmpty()) return;
|
||||
ObserverContext<WALCoprocessorEnvironment> ctx = null;
|
||||
List<WALEnvironment> envs = coprocessors.get();
|
||||
for (int i = 0; i < envs.size(); i++) {
|
||||
WALEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof WALObserver) {
|
||||
final WALObserver observer = (WALObserver)env.getInstance();
|
||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observer.postWALWrite(ctx, info, logKey, logEdit);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
|
||||
@Override
|
||||
protected void call(WALObserver observer) throws IOException {
|
||||
observer.postWALWrite(this, info, logKey, logEdit);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -194,29 +180,12 @@ public class WALCoprocessorHost
|
|||
* @param newPath the path of the wal we are going to create
|
||||
*/
|
||||
public void preWALRoll(Path oldPath, Path newPath) throws IOException {
|
||||
if (this.coprocessors == null || this.coprocessors.isEmpty()) return;
|
||||
ObserverContext<WALCoprocessorEnvironment> ctx = null;
|
||||
List<WALEnvironment> envs = coprocessors.get();
|
||||
for (int i = 0; i < envs.size(); i++) {
|
||||
WALEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof WALObserver) {
|
||||
final WALObserver observer = (WALObserver)env.getInstance();
|
||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observer.preWALRoll(ctx, oldPath, newPath);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
|
||||
@Override
|
||||
protected void call(WALObserver observer) throws IOException {
|
||||
observer.preWALRoll(this, oldPath, newPath);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -225,28 +194,11 @@ public class WALCoprocessorHost
|
|||
* @param newPath the path of the wal we have created and now is the current
|
||||
*/
|
||||
public void postWALRoll(Path oldPath, Path newPath) throws IOException {
|
||||
if (this.coprocessors == null || this.coprocessors.isEmpty()) return;
|
||||
ObserverContext<WALCoprocessorEnvironment> ctx = null;
|
||||
List<WALEnvironment> envs = coprocessors.get();
|
||||
for (int i = 0; i < envs.size(); i++) {
|
||||
WALEnvironment env = envs.get(i);
|
||||
if (env.getInstance() instanceof WALObserver) {
|
||||
final WALObserver observer = (WALObserver)env.getInstance();
|
||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
ClassLoader cl = currentThread.getContextClassLoader();
|
||||
try {
|
||||
currentThread.setContextClassLoader(env.getClassLoader());
|
||||
observer.postWALRoll(ctx, oldPath, newPath);
|
||||
} catch (Throwable e) {
|
||||
handleCoprocessorThrowable(env, e);
|
||||
} finally {
|
||||
currentThread.setContextClassLoader(cl);
|
||||
}
|
||||
if (ctx.shouldComplete()) {
|
||||
break;
|
||||
}
|
||||
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
|
||||
@Override
|
||||
protected void call(WALObserver observer) throws IOException {
|
||||
observer.postWALRoll(this, oldPath, newPath);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,11 +21,13 @@ package org.apache.hadoop.hbase.replication.regionserver;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -40,9 +42,14 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
*/
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class ReplicationObserver implements RegionObserver {
|
||||
public class ReplicationObserver implements RegionCoprocessor, RegionObserver {
|
||||
private static final Log LOG = LogFactory.getLog(ReplicationObserver.class);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
|
||||
final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
@ -74,13 +75,15 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.EndpointObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
|
||||
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
|
||||
|
@ -169,8 +172,10 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class AccessController implements MasterObserver, RegionObserver, RegionServerObserver,
|
||||
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver {
|
||||
public class AccessController implements MasterCoprocessor, RegionCoprocessor,
|
||||
RegionServerCoprocessor, AccessControlService.Interface,
|
||||
MasterObserver, RegionObserver, RegionServerObserver, EndpointObserver, BulkLoadObserver {
|
||||
// TODO: encapsulate observer functions into separate class/sub-class.
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(AccessController.class);
|
||||
|
||||
|
@ -987,6 +992,39 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
|
|||
}
|
||||
}
|
||||
|
||||
/*********************************** Observer/Service Getters ***********************************/
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<EndpointObserver> getEndpointObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<BulkLoadObserver> getBulkLoadObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionServerObserver> getRegionServerObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(AccessControlProtos.AccessControlService.newReflectiveService(this));
|
||||
}
|
||||
|
||||
/*********************************** Observer implementations ***********************************/
|
||||
|
||||
@Override
|
||||
public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
|
||||
TableDescriptor desc, RegionInfo[] regions) throws IOException {
|
||||
|
@ -2448,11 +2486,6 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
|
|||
done.run(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return AccessControlProtos.AccessControlService.newReflectiveService(this);
|
||||
}
|
||||
|
||||
private Region getRegion(RegionCoprocessorEnvironment e) {
|
||||
return e.getRegion();
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.regex.Matcher;
|
||||
|
||||
import org.apache.commons.io.FilenameUtils;
|
||||
|
@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -43,7 +45,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* Master observer for restricting coprocessor assignments.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class CoprocessorWhitelistMasterObserver implements MasterObserver {
|
||||
public class CoprocessorWhitelistMasterObserver implements MasterCoprocessor, MasterObserver {
|
||||
|
||||
public static final String CP_COPROCESSOR_WHITELIST_PATHS_KEY =
|
||||
"hbase.coprocessor.region.whitelist.paths";
|
||||
|
@ -51,6 +53,11 @@ public class CoprocessorWhitelistMasterObserver implements MasterObserver {
|
|||
private static final Log LOG = LogFactory
|
||||
.getLog(CoprocessorWhitelistMasterObserver.class);
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
TableName tableName, TableDescriptor htd) throws IOException {
|
||||
|
|
|
@ -19,13 +19,12 @@
|
|||
package org.apache.hadoop.hbase.security.token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
|
@ -42,6 +41,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import com.google.protobuf.RpcCallback;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.Service;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Provides a service for obtaining authentication tokens via the
|
||||
|
@ -49,7 +49,7 @@ import com.google.protobuf.Service;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class TokenProvider implements AuthenticationProtos.AuthenticationService.Interface,
|
||||
Coprocessor, CoprocessorService {
|
||||
RegionCoprocessor {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TokenProvider.class);
|
||||
|
||||
|
@ -96,8 +96,8 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService
|
|||
// AuthenticationService implementation
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return AuthenticationProtos.AuthenticationService.newReflectiveService(this);
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(AuthenticationProtos.AuthenticationService.newReflectiveService(this));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.HashMap;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -64,14 +65,13 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
import org.apache.hadoop.hbase.constraint.ConstraintException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
|
@ -101,7 +101,6 @@ import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
|||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
|
||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||
import org.apache.hadoop.hbase.security.Superusers;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
|
@ -122,8 +121,9 @@ import com.google.protobuf.Service;
|
|||
* visibility labels
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class VisibilityController implements MasterObserver, RegionObserver,
|
||||
VisibilityLabelsService.Interface, CoprocessorService {
|
||||
// TODO: break out Observer functions into separate class/sub-class.
|
||||
public class VisibilityController implements MasterCoprocessor, RegionCoprocessor,
|
||||
VisibilityLabelsService.Interface, MasterObserver, RegionObserver {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(VisibilityController.class);
|
||||
private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger."
|
||||
|
@ -176,10 +176,6 @@ public class VisibilityController implements MasterObserver, RegionObserver,
|
|||
+ " accordingly.");
|
||||
}
|
||||
|
||||
if (env instanceof RegionServerCoprocessorEnvironment) {
|
||||
throw new RuntimeException("Visibility controller should not be configured as "
|
||||
+ "'hbase.coprocessor.regionserver.classes'.");
|
||||
}
|
||||
// Do not create for master CPs
|
||||
if (!(env instanceof MasterCoprocessorEnvironment)) {
|
||||
visibilityLabelService = VisibilityLabelServiceManager.getInstance()
|
||||
|
@ -192,6 +188,22 @@ public class VisibilityController implements MasterObserver, RegionObserver,
|
|||
|
||||
}
|
||||
|
||||
/**************************** Observer/Service Getters ************************************/
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Service> getService() {
|
||||
return Optional.of(VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this));
|
||||
}
|
||||
|
||||
/********************************* Master related hooks **********************************/
|
||||
|
||||
@Override
|
||||
|
@ -760,11 +772,6 @@ public class VisibilityController implements MasterObserver, RegionObserver,
|
|||
return rewriteCell;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Service getService() {
|
||||
return VisibilityLabelsProtos.VisibilityLabelsService.newReflectiveService(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean postScannerFilterRow(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final InternalScanner s, final Cell curRowCell, final boolean hasMore) throws IOException {
|
||||
|
@ -1086,35 +1093,6 @@ public class VisibilityController implements MasterObserver, RegionObserver,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A RegionServerObserver impl that provides the custom
|
||||
* VisibilityReplicationEndpoint. This class should be configured as the
|
||||
* 'hbase.coprocessor.regionserver.classes' for the visibility tags to be
|
||||
* replicated as string. The value for the configuration should be
|
||||
* 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'.
|
||||
*/
|
||||
public static class VisibilityReplication implements RegionServerObserver {
|
||||
private Configuration conf;
|
||||
private VisibilityLabelService visibilityLabelService;
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
this.conf = env.getConfiguration();
|
||||
visibilityLabelService = VisibilityLabelServiceManager.getInstance()
|
||||
.getVisibilityLabelService(this.conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReplicationEndpoint postCreateReplicationEndPoint(
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint) {
|
||||
return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param t
|
||||
* @return NameValuePair of the exception name to stringified version os exception.
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.security.visibility;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* A RegionServerObserver impl that provides the custom
|
||||
* VisibilityReplicationEndpoint. This class should be configured as the
|
||||
* 'hbase.coprocessor.regionserver.classes' for the visibility tags to be
|
||||
* replicated as string. The value for the configuration should be
|
||||
* 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'.
|
||||
*/
|
||||
public class VisibilityReplication implements RegionServerCoprocessor, RegionServerObserver {
|
||||
private Configuration conf;
|
||||
private VisibilityLabelService visibilityLabelService;
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
this.conf = env.getConfiguration();
|
||||
visibilityLabelService = VisibilityLabelServiceManager.getInstance()
|
||||
.getVisibilityLabelService(this.conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||
}
|
||||
|
||||
@Override public Optional<RegionServerObserver> getRegionServerObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReplicationEndpoint postCreateReplicationEndPoint(
|
||||
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint) {
|
||||
return new VisibilityReplicationEndpoint(endpoint, visibilityLabelService);
|
||||
}
|
||||
}
|
|
@ -23,12 +23,14 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
|
@ -58,9 +60,15 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
* 0 row(s) in 0.0050 seconds
|
||||
* </p>
|
||||
*/
|
||||
public class WriteSinkCoprocessor implements RegionObserver {
|
||||
public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);
|
||||
private final AtomicLong ops = new AtomicLong();
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
private String regionName;
|
||||
|
||||
@Override
|
||||
|
@ -68,7 +76,6 @@ public class WriteSinkCoprocessor implements RegionObserver {
|
|||
regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final MiniBatchOperationInProgress<Mutation> miniBatchOp)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||
|
@ -159,13 +161,18 @@ public class HConnectionTestingUtility {
|
|||
/**
|
||||
* This coproceesor sleep 2s at first increment/append rpc call.
|
||||
*/
|
||||
public static class SleepAtFirstRpcCall implements RegionObserver {
|
||||
public static class SleepAtFirstRpcCall implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong ct = new AtomicLong(0);
|
||||
static final String SLEEP_TIME_CONF_KEY =
|
||||
"hbase.coprocessor.SleepAtFirstRpcCall.sleepTime";
|
||||
static final long DEFAULT_SLEEP_TIME = 2000;
|
||||
static final AtomicLong sleepTime = new AtomicLong(DEFAULT_SLEEP_TIME);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
public SleepAtFirstRpcCall() {
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -35,6 +36,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -170,10 +172,15 @@ public class TestAsyncAdminBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestRpcTimeoutCoprocessor implements MasterObserver {
|
||||
public static class TestRpcTimeoutCoprocessor implements MasterCoprocessor, MasterObserver {
|
||||
public TestRpcTimeoutCoprocessor() {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
@Override
|
||||
public void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
String namespace) throws IOException {
|
||||
|
@ -181,12 +188,17 @@ public class TestAsyncAdminBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestOperationTimeoutCoprocessor implements MasterObserver {
|
||||
public static class TestOperationTimeoutCoprocessor implements MasterCoprocessor, MasterObserver {
|
||||
AtomicLong sleepTime = new AtomicLong(0);
|
||||
|
||||
public TestOperationTimeoutCoprocessor() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
String namespace) throws IOException {
|
||||
|
@ -197,12 +209,17 @@ public class TestAsyncAdminBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestMaxRetriesCoprocessor implements MasterObserver {
|
||||
public static class TestMaxRetriesCoprocessor implements MasterCoprocessor, MasterObserver {
|
||||
AtomicLong retryNum = new AtomicLong(0);
|
||||
|
||||
public TestMaxRetriesCoprocessor() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
String namespace) throws IOException {
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
|
@ -73,7 +75,12 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
|
|||
|
||||
private static AtomicInteger MAX_CONCURRENCY = new AtomicInteger(0);
|
||||
|
||||
public static final class CountingRegionObserver implements RegionObserver {
|
||||
public static final class CountingRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||
|
@ -65,7 +67,11 @@ public class TestAsyncRegionLocatorTimeout {
|
|||
|
||||
private static volatile long SLEEP_MS = 0L;
|
||||
|
||||
public static class SleepRegionObserver implements RegionObserver {
|
||||
public static class SleepRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.io.UncheckedIOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
|
@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
|
@ -202,7 +204,12 @@ public class TestAsyncTableBatch {
|
|||
assertEquals(4, Bytes.toInt(appendValue, 8));
|
||||
}
|
||||
|
||||
public static final class ErrorInjectObserver implements RegionObserver {
|
||||
public static final class ErrorInjectObserver implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -38,6 +39,7 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
|
@ -253,7 +255,12 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CompactorRegionObserver implements RegionObserver {
|
||||
public static class CompactorRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
|
@ -1549,7 +1551,7 @@ public class TestBlockEvictionFromClient {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CustomInnerRegionObserver implements RegionObserver {
|
||||
public static class CustomInnerRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(0);
|
||||
static final AtomicBoolean slowDownNext = new AtomicBoolean(false);
|
||||
static final AtomicInteger countOfNext = new AtomicInteger(0);
|
||||
|
@ -1559,6 +1561,11 @@ public class TestBlockEvictionFromClient {
|
|||
private static final AtomicReference<CountDownLatch> cdl = new AtomicReference<>(
|
||||
new CountDownLatch(0));
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean postScannerNext(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
InternalScanner s, List<Result> results, int limit, boolean hasMore) throws IOException {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
|
@ -45,6 +46,7 @@ import java.io.InterruptedIOException;
|
|||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@Category({MediumTests.class, ClientTests.class})
|
||||
|
@ -58,7 +60,12 @@ public class TestClientOperationInterrupt {
|
|||
private static final byte[] test = Bytes.toBytes("test");
|
||||
private static Configuration conf;
|
||||
|
||||
public static class TestCoprocessor implements RegionObserver {
|
||||
public static class TestCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
|
|||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
|
@ -189,10 +191,15 @@ public class TestEnableTable {
|
|||
}
|
||||
}
|
||||
|
||||
public static class MasterSyncObserver implements MasterObserver {
|
||||
public static class MasterSyncObserver implements MasterCoprocessor, MasterObserver {
|
||||
volatile CountDownLatch tableCreationLatch = null;
|
||||
volatile CountDownLatch tableDeletionLatch = null;
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCompletedCreateTableAction(
|
||||
final ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
|
@ -222,8 +229,8 @@ public class TestEnableTable {
|
|||
throws Exception {
|
||||
// NOTE: We need a latch because admin is not sync,
|
||||
// so the postOp coprocessor method may be called after the admin operation returned.
|
||||
MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName());
|
||||
MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class);
|
||||
observer.tableCreationLatch = new CountDownLatch(1);
|
||||
Admin admin = testUtil.getAdmin();
|
||||
if (splitKeys != null) {
|
||||
|
@ -240,8 +247,8 @@ public class TestEnableTable {
|
|||
throws Exception {
|
||||
// NOTE: We need a latch because admin is not sync,
|
||||
// so the postOp coprocessor method may be called after the admin operation returned.
|
||||
MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName());
|
||||
MasterSyncObserver observer = testUtil.getHBaseCluster().getMaster()
|
||||
.getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class);
|
||||
observer.tableDeletionLatch = new CountDownLatch(1);
|
||||
Admin admin = testUtil.getAdmin();
|
||||
try {
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -74,6 +75,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
|||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.exceptions.ScannerResetException;
|
||||
|
@ -543,7 +545,7 @@ public class TestFromClientSide {
|
|||
* This is a coprocessor to inject a test failure so that a store scanner.reseek() call will
|
||||
* fail with an IOException() on the first call.
|
||||
*/
|
||||
public static class ExceptionInReseekRegionObserver implements RegionObserver {
|
||||
public static class ExceptionInReseekRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
static AtomicLong reqCount = new AtomicLong(0);
|
||||
static AtomicBoolean isDoNotRetry = new AtomicBoolean(false); // whether to throw DNRIOE
|
||||
static AtomicBoolean throwOnce = new AtomicBoolean(true); // whether to only throw once
|
||||
|
@ -554,6 +556,11 @@ public class TestFromClientSide {
|
|||
throwOnce.set(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
class MyStoreScanner extends StoreScanner {
|
||||
public MyStoreScanner(HStore store, ScanInfo scanInfo, Scan scan, NavigableSet<byte[]> columns,
|
||||
long readPt) throws IOException {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.Cell;
|
|||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
|
@ -688,7 +690,7 @@ public class TestFromClientSide3 {
|
|||
|
||||
private void testPreBatchMutate(TableName tableName, Runnable rn)throws Exception {
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addCoprocessor(WatiingForScanObserver.class.getName());
|
||||
desc.addCoprocessor(WaitingForScanObserver.class.getName());
|
||||
desc.addFamily(new HColumnDescriptor(FAMILY));
|
||||
TEST_UTIL.getAdmin().createTable(desc);
|
||||
ExecutorService service = Executors.newFixedThreadPool(2);
|
||||
|
@ -720,7 +722,7 @@ public class TestFromClientSide3 {
|
|||
public void testLockLeakWithDelta() throws Exception, Throwable {
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addCoprocessor(WatiingForMultiMutationsObserver.class.getName());
|
||||
desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName());
|
||||
desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILY));
|
||||
TEST_UTIL.getAdmin().createTable(desc);
|
||||
|
@ -735,7 +737,7 @@ public class TestFromClientSide3 {
|
|||
try (Table table = con.getTable(tableName)) {
|
||||
Put put = new Put(ROW);
|
||||
put.addColumn(FAMILY, QUALIFIER, VALUE);
|
||||
// the put will be blocked by WatiingForMultiMutationsObserver.
|
||||
// the put will be blocked by WaitingForMultiMutationsObserver.
|
||||
table.put(put);
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
|
@ -753,7 +755,7 @@ public class TestFromClientSide3 {
|
|||
});
|
||||
appendService.shutdown();
|
||||
appendService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
|
||||
WatiingForMultiMutationsObserver observer = find(tableName, WatiingForMultiMutationsObserver.class);
|
||||
WaitingForMultiMutationsObserver observer = find(tableName, WaitingForMultiMutationsObserver.class);
|
||||
observer.latch.countDown();
|
||||
putService.shutdown();
|
||||
putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
|
||||
|
@ -774,7 +776,7 @@ public class TestFromClientSide3 {
|
|||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
desc.addCoprocessor(MultiRowMutationEndpoint.class.getName());
|
||||
desc.addCoprocessor(WatiingForMultiMutationsObserver.class.getName());
|
||||
desc.addCoprocessor(WaitingForMultiMutationsObserver.class.getName());
|
||||
desc.setConfiguration("hbase.rowlock.wait.duration", String.valueOf(5000));
|
||||
desc.addFamily(new HColumnDescriptor(FAMILY));
|
||||
TEST_UTIL.getAdmin().createTable(desc);
|
||||
|
@ -793,7 +795,7 @@ public class TestFromClientSide3 {
|
|||
try (Table table = con.getTable(tableName)) {
|
||||
Put put0 = new Put(rowLocked);
|
||||
put0.addColumn(FAMILY, QUALIFIER, value0);
|
||||
// the put will be blocked by WatiingForMultiMutationsObserver.
|
||||
// the put will be blocked by WaitingForMultiMutationsObserver.
|
||||
table.put(put0);
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException(ex);
|
||||
|
@ -830,7 +832,7 @@ public class TestFromClientSide3 {
|
|||
});
|
||||
cpService.shutdown();
|
||||
cpService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
|
||||
WatiingForMultiMutationsObserver observer = find(tableName, WatiingForMultiMutationsObserver.class);
|
||||
WaitingForMultiMutationsObserver observer = find(tableName, WaitingForMultiMutationsObserver.class);
|
||||
observer.latch.countDown();
|
||||
putService.shutdown();
|
||||
putService.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
|
||||
|
@ -975,8 +977,15 @@ public class TestFromClientSide3 {
|
|||
return clz.cast(cp);
|
||||
}
|
||||
|
||||
public static class WatiingForMultiMutationsObserver implements RegionObserver {
|
||||
public static class WaitingForMultiMutationsObserver
|
||||
implements RegionCoprocessor, RegionObserver {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
@ -988,8 +997,14 @@ public class TestFromClientSide3 {
|
|||
}
|
||||
}
|
||||
|
||||
public static class WatiingForScanObserver implements RegionObserver {
|
||||
public static class WaitingForScanObserver implements RegionCoprocessor, RegionObserver {
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.lang.reflect.Modifier;
|
|||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
|
@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
|
||||
|
@ -118,7 +120,7 @@ public class TestHCM {
|
|||
/**
|
||||
* This copro sleeps 20 second. The first call it fails. The second time, it works.
|
||||
*/
|
||||
public static class SleepAndFailFirstTime implements RegionObserver {
|
||||
public static class SleepAndFailFirstTime implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong ct = new AtomicLong(0);
|
||||
static final String SLEEP_TIME_CONF_KEY =
|
||||
"hbase.coprocessor.SleepAndFailFirstTime.sleepTime";
|
||||
|
@ -128,6 +130,11 @@ public class TestHCM {
|
|||
public SleepAndFailFirstTime() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> c) {
|
||||
RegionCoprocessorEnvironment env = c.getEnvironment();
|
||||
|
@ -175,8 +182,14 @@ public class TestHCM {
|
|||
|
||||
}
|
||||
|
||||
public static class SleepCoprocessor implements RegionObserver {
|
||||
public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
public static final int SLEEP_TIME = 5000;
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
@ -204,9 +217,15 @@ public class TestHCM {
|
|||
|
||||
}
|
||||
|
||||
public static class SleepLongerAtFirstCoprocessor implements RegionObserver {
|
||||
public static class SleepLongerAtFirstCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
public static final int SLEEP_TIME = 2000;
|
||||
static final AtomicLong ct = new AtomicLong(0);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
|
||||
|
@ -137,7 +139,13 @@ public class TestMobCloneSnapshotFromClient extends TestCloneSnapshotFromClient
|
|||
/**
|
||||
* This coprocessor is used to delay the flush.
|
||||
*/
|
||||
public static class DelayFlushCoprocessor implements RegionObserver {
|
||||
public static class DelayFlushCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
|
||||
if (delayFlush) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.Waiter;
|
|||
|
||||
import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||
|
@ -83,13 +85,18 @@ public class TestReplicaWithCluster {
|
|||
/**
|
||||
* This copro is used to synchronize the tests.
|
||||
*/
|
||||
public static class SlowMeCopro implements RegionObserver {
|
||||
public static class SlowMeCopro implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(0);
|
||||
static final AtomicReference<CountDownLatch> cdl = new AtomicReference<>(new CountDownLatch(0));
|
||||
|
||||
public SlowMeCopro() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
@ -119,11 +126,16 @@ public class TestReplicaWithCluster {
|
|||
/**
|
||||
* This copro is used to simulate region server down exception for Get and Scan
|
||||
*/
|
||||
public static class RegionServerStoppedCopro implements RegionObserver {
|
||||
public static class RegionServerStoppedCopro implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
public RegionServerStoppedCopro() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
@ -164,10 +176,16 @@ public class TestReplicaWithCluster {
|
|||
/**
|
||||
* This copro is used to slow down the primary meta region scan a bit
|
||||
*/
|
||||
public static class RegionServerHostingPrimayMetaRegionSlowOrStopCopro implements RegionObserver {
|
||||
public static class RegionServerHostingPrimayMetaRegionSlowOrStopCopro
|
||||
implements RegionCoprocessor, RegionObserver {
|
||||
static boolean slowDownPrimaryMetaScan = false;
|
||||
static boolean throwException = false;
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.ArrayList;
|
|||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
|
|||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
|
@ -98,7 +100,7 @@ public class TestReplicasClient {
|
|||
/**
|
||||
* This copro is used to synchronize the tests.
|
||||
*/
|
||||
public static class SlowMeCopro implements RegionObserver {
|
||||
public static class SlowMeCopro implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(0);
|
||||
static final AtomicBoolean slowDownNext = new AtomicBoolean(false);
|
||||
static final AtomicInteger countOfNext = new AtomicInteger(0);
|
||||
|
@ -108,6 +110,11 @@ public class TestReplicasClient {
|
|||
public SlowMeCopro() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
|
||||
import static junit.framework.TestCase.assertTrue;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
|
@ -29,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
|
@ -101,7 +104,11 @@ public class TestResultFromCoprocessor {
|
|||
}
|
||||
}
|
||||
|
||||
public static class MyObserver implements RegionObserver {
|
||||
public static class MyObserver implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result postAppend(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -32,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.ipc.ServerTooBusyException;
|
||||
|
@ -66,8 +68,13 @@ public class TestServerBusyException {
|
|||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
||||
public static class SleepCoprocessor implements RegionObserver {
|
||||
public static class SleepCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
public static final int SLEEP_TIME = 5000;
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
@ -95,9 +102,15 @@ public class TestServerBusyException {
|
|||
|
||||
}
|
||||
|
||||
public static class SleepLongerAtFirstCoprocessor implements RegionObserver {
|
||||
public static class SleepLongerAtFirstCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
public static final int SLEEP_TIME = 2000;
|
||||
static final AtomicLong ct = new AtomicLong(0);
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -39,9 +40,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
|
|||
* passed-in WALEdit, i.e, ignore specified columns when writing, or add a KeyValue. On the other
|
||||
* side, it checks whether the ignored column is still in WAL when Restoreed at region reconstruct.
|
||||
*/
|
||||
public class SampleRegionWALObserver implements WALObserver, RegionObserver {
|
||||
public class SampleRegionWALCoprocessor implements WALCoprocessor, RegionCoprocessor,
|
||||
WALObserver, RegionObserver {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(SampleRegionWALObserver.class);
|
||||
private static final Log LOG = LogFactory.getLog(SampleRegionWALCoprocessor.class);
|
||||
|
||||
private byte[] tableName;
|
||||
private byte[] row;
|
||||
|
@ -81,6 +83,15 @@ public class SampleRegionWALObserver implements WALObserver, RegionObserver {
|
|||
postWALRollCalled = false;
|
||||
}
|
||||
|
||||
@Override public Optional<WALObserver> getWALObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> env,
|
||||
RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
|
||||
|
@ -167,13 +178,13 @@ public class SampleRegionWALObserver implements WALObserver, RegionObserver {
|
|||
}
|
||||
|
||||
public boolean isPreWALRestoreCalled() {
|
||||
LOG.debug(SampleRegionWALObserver.class.getName() +
|
||||
LOG.debug(SampleRegionWALCoprocessor.class.getName() +
|
||||
".isPreWALRestoreCalled is called.");
|
||||
return preWALRestoreCalled;
|
||||
}
|
||||
|
||||
public boolean isPostWALRestoreCalled() {
|
||||
LOG.debug(SampleRegionWALObserver.class.getName() +
|
||||
LOG.debug(SampleRegionWALCoprocessor.class.getName() +
|
||||
".isPostWALRestoreCalled is called.");
|
||||
return postWALRestoreCalled;
|
||||
}
|
|
@ -28,6 +28,7 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
|
@ -72,7 +73,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
|
|||
* A sample region observer that tests the RegionObserver interface.
|
||||
* It works with TestRegionObserverInterface to provide the test case.
|
||||
*/
|
||||
public class SimpleRegionObserver implements RegionObserver {
|
||||
public class SimpleRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
final AtomicInteger ctBeforeDelete = new AtomicInteger(1);
|
||||
final AtomicInteger ctPreOpen = new AtomicInteger(0);
|
||||
|
@ -134,6 +135,11 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
throwOnPostFlush.set(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment e) throws IOException {
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.mockito.Mockito.*;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -74,7 +74,8 @@ public class TestCoprocessorConfiguration {
|
|||
private static final AtomicBoolean systemCoprocessorLoaded = new AtomicBoolean();
|
||||
private static final AtomicBoolean tableCoprocessorLoaded = new AtomicBoolean();
|
||||
|
||||
public static class SystemCoprocessor implements Coprocessor {
|
||||
public static class SystemCoprocessor implements MasterCoprocessor, RegionCoprocessor,
|
||||
RegionServerCoprocessor {
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
systemCoprocessorLoaded.set(true);
|
||||
|
@ -84,7 +85,7 @@ public class TestCoprocessorConfiguration {
|
|||
public void stop(CoprocessorEnvironment env) throws IOException { }
|
||||
}
|
||||
|
||||
public static class TableCoprocessor implements Coprocessor {
|
||||
public static class TableCoprocessor implements RegionCoprocessor {
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
tableCoprocessorLoaded.set(true);
|
||||
|
@ -108,7 +109,7 @@ public class TestCoprocessorConfiguration {
|
|||
systemCoprocessorLoaded.get(),
|
||||
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED);
|
||||
assertEquals("Table coprocessors loading default was not honored",
|
||||
tableCoprocessorLoaded.get(),
|
||||
tableCoprocessorLoaded.get(),
|
||||
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED &&
|
||||
CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ public class TestCoprocessorHost {
|
|||
/**
|
||||
* An {@link Abortable} implementation for tests.
|
||||
*/
|
||||
class TestAbortable implements Abortable {
|
||||
private class TestAbortable implements Abortable {
|
||||
private volatile boolean aborted = false;
|
||||
|
||||
@Override
|
||||
|
@ -56,13 +56,23 @@ public class TestCoprocessorHost {
|
|||
@Test
|
||||
public void testDoubleLoadingAndPriorityValue() {
|
||||
final Configuration conf = HBaseConfiguration.create();
|
||||
CoprocessorHost<CoprocessorEnvironment> host =
|
||||
new CoprocessorHost<CoprocessorEnvironment>(new TestAbortable()) {
|
||||
final Configuration cpHostConf = conf;
|
||||
CoprocessorHost<RegionCoprocessor, CoprocessorEnvironment<RegionCoprocessor>> host =
|
||||
new CoprocessorHost<RegionCoprocessor, CoprocessorEnvironment<RegionCoprocessor>>(
|
||||
new TestAbortable()) {
|
||||
@Override
|
||||
public RegionCoprocessor checkAndGetInstance(Class<?> implClass)
|
||||
throws InstantiationException, IllegalAccessException {
|
||||
if(RegionCoprocessor.class.isAssignableFrom(implClass)) {
|
||||
return (RegionCoprocessor)implClass.newInstance();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
final Configuration cpHostConf = conf;
|
||||
|
||||
@Override
|
||||
public CoprocessorEnvironment createEnvironment(Class<?> implClass,
|
||||
final Coprocessor instance, final int priority, int sequence, Configuration conf) {
|
||||
public CoprocessorEnvironment createEnvironment(final RegionCoprocessor instance,
|
||||
final int priority, int sequence, Configuration conf) {
|
||||
return new CoprocessorEnvironment() {
|
||||
final Coprocessor envInstance = instance;
|
||||
|
||||
|
@ -106,6 +116,12 @@ public class TestCoprocessorHost {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startup() throws IOException {}
|
||||
|
||||
@Override
|
||||
public void shutdown() {}
|
||||
|
||||
@Override
|
||||
public ClassLoader getClassLoader() {
|
||||
return null;
|
||||
|
@ -116,13 +132,16 @@ public class TestCoprocessorHost {
|
|||
final String key = "KEY";
|
||||
final String coprocessor = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
|
||||
// Try and load a coprocessor three times
|
||||
conf.setStrings(key, coprocessor, coprocessor, coprocessor, SimpleRegionObserverV2.class.getName());
|
||||
conf.setStrings(key, coprocessor, coprocessor, coprocessor,
|
||||
SimpleRegionObserverV2.class.getName());
|
||||
host.loadSystemCoprocessors(conf, key);
|
||||
// Two coprocessors(SimpleRegionObserver and SimpleRegionObserverV2) loaded
|
||||
Assert.assertEquals(2, host.coprocessors.size());
|
||||
Assert.assertEquals(2, host.coprocEnvironments.size());
|
||||
// Check the priority value
|
||||
CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment(SimpleRegionObserver.class.getName());
|
||||
CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName());
|
||||
CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment(
|
||||
SimpleRegionObserver.class.getName());
|
||||
CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment(
|
||||
SimpleRegionObserverV2.class.getName());
|
||||
assertNotNull(simpleEnv);
|
||||
assertNotNull(simpleEnv_v2);
|
||||
assertEquals(Coprocessor.PRIORITY_SYSTEM, simpleEnv.getPriority());
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -62,7 +63,6 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
|||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.Rule;
|
||||
|
@ -149,7 +149,7 @@ public class TestCoprocessorInterface {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CoprocessorImpl implements RegionObserver {
|
||||
public static class CoprocessorImpl implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
private boolean startCalled;
|
||||
private boolean stopCalled;
|
||||
|
@ -177,6 +177,11 @@ public class TestCoprocessorInterface {
|
|||
stopCalled = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
|
||||
preOpenCalled = true;
|
||||
|
@ -242,23 +247,31 @@ public class TestCoprocessorInterface {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CoprocessorII implements RegionObserver {
|
||||
public static class CoprocessorII implements RegionCoprocessor {
|
||||
private ConcurrentMap<String, Object> sharedData;
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment e) {
|
||||
sharedData = ((RegionCoprocessorEnvironment)e).getSharedData();
|
||||
sharedData.putIfAbsent("test2", new Object());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(CoprocessorEnvironment e) {
|
||||
sharedData = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
if (1/0 == 1) {
|
||||
e.complete();
|
||||
}
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(new RegionObserver() {
|
||||
@Override
|
||||
public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Get get, final List<Cell> results) throws IOException {
|
||||
if (1/0 == 1) {
|
||||
e.complete();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Map<String, Object> getSharedData() {
|
||||
|
@ -272,8 +285,7 @@ public class TestCoprocessorInterface {
|
|||
byte [][] families = { fam1, fam2, fam3 };
|
||||
|
||||
Configuration hc = initConfig();
|
||||
Region region = initHRegion(tableName, name.getMethodName(), hc,
|
||||
new Class<?>[]{}, families);
|
||||
Region region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[]{}, families);
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
HBaseTestCase.addContent(region, fam3);
|
||||
|
@ -284,18 +296,16 @@ public class TestCoprocessorInterface {
|
|||
|
||||
region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
|
||||
|
||||
Coprocessor c = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorImpl.class.getName());
|
||||
Coprocessor c2 = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorII.class.getName());
|
||||
Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
|
||||
Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
|
||||
Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
|
||||
Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
|
||||
assertNotNull(o);
|
||||
assertNotNull(o2);
|
||||
// to coprocessors get different sharedDatas
|
||||
assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
|
||||
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName());
|
||||
c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName());
|
||||
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
|
||||
c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
|
||||
// make sure that all coprocessor of a class have identical sharedDatas
|
||||
assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
|
||||
assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
|
||||
|
@ -312,21 +322,18 @@ public class TestCoprocessorInterface {
|
|||
fail();
|
||||
} catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
|
||||
}
|
||||
assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()));
|
||||
c = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorImpl.class.getName());
|
||||
assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class));
|
||||
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
|
||||
assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
|
||||
c = c2 = null;
|
||||
// perform a GC
|
||||
System.gc();
|
||||
// reopen the region
|
||||
region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
|
||||
c = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorImpl.class.getName());
|
||||
c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
|
||||
// CPimpl is unaffected, still the same reference
|
||||
assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
|
||||
c2 = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorII.class.getName());
|
||||
c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class);
|
||||
// new map and object created, hence the reference is different
|
||||
// hence the old entry was indeed removed by the GC and new one has been created
|
||||
Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
|
||||
|
@ -357,8 +364,7 @@ public class TestCoprocessorInterface {
|
|||
scanner.next(new ArrayList<>());
|
||||
|
||||
HBaseTestingUtility.closeRegionAndWAL(region);
|
||||
Coprocessor c = region.getCoprocessorHost().
|
||||
findCoprocessor(CoprocessorImpl.class.getName());
|
||||
Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class);
|
||||
|
||||
assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
|
||||
assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
|
||||
|
@ -382,7 +388,7 @@ public class TestCoprocessorInterface {
|
|||
((HRegion)r).setCoprocessorHost(host);
|
||||
|
||||
for (Class<?> implClass : implClasses) {
|
||||
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
|
||||
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
|
||||
}
|
||||
// we need to manually call pre- and postOpen here since the
|
||||
// above load() is not the real case for CP loading. A CP is
|
||||
|
@ -412,7 +418,7 @@ public class TestCoprocessorInterface {
|
|||
((HRegion)r).setCoprocessorHost(host);
|
||||
|
||||
for (Class<?> implClass : implClasses) {
|
||||
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
|
||||
host.load((Class<? extends RegionCoprocessor>) implClass, Coprocessor.PRIORITY_USER, conf);
|
||||
Coprocessor c = host.findCoprocessor(implClass.getName());
|
||||
assertNotNull(c);
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ public class TestCoprocessorMetrics {
|
|||
/**
|
||||
* MasterObserver that has a Timer metric for create table operation.
|
||||
*/
|
||||
public static class CustomMasterObserver implements MasterObserver {
|
||||
public static class CustomMasterObserver implements MasterCoprocessor, MasterObserver {
|
||||
private Timer createTableTimer;
|
||||
private long start = Long.MIN_VALUE;
|
||||
|
||||
|
@ -125,14 +125,25 @@ public class TestCoprocessorMetrics {
|
|||
createTableTimer = registry.timer("CreateTable");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RegionServerObserver that has a Counter for rollWAL requests.
|
||||
*/
|
||||
public static class CustomRegionServerObserver implements RegionServerObserver {
|
||||
public static class CustomRegionServerObserver implements RegionServerCoprocessor,
|
||||
RegionServerObserver {
|
||||
/** This is the Counter metric object to keep track of the current count across invocations */
|
||||
private Counter rollWALCounter;
|
||||
|
||||
@Override public Optional<RegionServerObserver> getRegionServerObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
|
@ -156,7 +167,7 @@ public class TestCoprocessorMetrics {
|
|||
/**
|
||||
* WALObserver that has a Counter for walEdits written.
|
||||
*/
|
||||
public static class CustomWALObserver implements WALObserver {
|
||||
public static class CustomWALObserver implements WALCoprocessor, WALObserver {
|
||||
private Counter walEditsCount;
|
||||
|
||||
@Override
|
||||
|
@ -177,12 +188,16 @@ public class TestCoprocessorMetrics {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override public Optional<WALObserver> getWALObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RegionObserver that has a Counter for preGet()
|
||||
*/
|
||||
public static class CustomRegionObserver implements RegionObserver {
|
||||
public static class CustomRegionObserver implements RegionCoprocessor, RegionObserver {
|
||||
private Counter preGetCounter;
|
||||
|
||||
@Override
|
||||
|
@ -191,6 +206,11 @@ public class TestCoprocessorMetrics {
|
|||
preGetCounter.increment();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
if (env instanceof RegionCoprocessorEnvironment) {
|
||||
|
|
|
@ -49,7 +49,7 @@ public class TestCoprocessorStop {
|
|||
private static final String REGIONSERVER_FILE =
|
||||
"regionserver" + System.currentTimeMillis();
|
||||
|
||||
public static class FooCoprocessor implements Coprocessor {
|
||||
public static class FooCoprocessor implements MasterCoprocessor, RegionServerCoprocessor {
|
||||
@Override
|
||||
public void start(CoprocessorEnvironment env) throws IOException {
|
||||
String where = null;
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
|
@ -87,7 +88,11 @@ public class TestHTableWrapper {
|
|||
private static final byte[] bytes4 = Bytes.toBytes(4);
|
||||
private static final byte[] bytes5 = Bytes.toBytes(5);
|
||||
|
||||
static class DummyRegionObserver implements RegionObserver {
|
||||
public static class DummyRegionObserver implements MasterCoprocessor, MasterObserver {
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
}
|
||||
|
||||
private Table hTableInterface;
|
||||
|
@ -135,14 +140,14 @@ public class TestHTableWrapper {
|
|||
public void testHTableInterfaceMethods() throws Exception {
|
||||
Configuration conf = util.getConfiguration();
|
||||
MasterCoprocessorHost cpHost = util.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
|
||||
Class<?> implClazz = DummyRegionObserver.class;
|
||||
Class<? extends MasterCoprocessor> implClazz = DummyRegionObserver.class;
|
||||
cpHost.load(implClazz, Coprocessor.PRIORITY_HIGHEST, conf);
|
||||
CoprocessorEnvironment env = cpHost.findCoprocessorEnvironment(implClazz.getName());
|
||||
assertEquals(Coprocessor.VERSION, env.getVersion());
|
||||
assertEquals(VersionInfo.getVersion(), env.getHBaseVersion());
|
||||
hTableInterface = env.getTable(TEST_TABLE);
|
||||
checkHTableInterfaceMethods();
|
||||
cpHost.shutdown(env);
|
||||
cpHost.shutdown((MasterCoprocessorEnvironment) env);
|
||||
}
|
||||
|
||||
private void checkHTableInterfaceMethods() throws Exception {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
|
@ -97,12 +98,17 @@ public class TestMasterCoprocessorExceptionWithAbort {
|
|||
}
|
||||
}
|
||||
|
||||
public static class BuggyMasterObserver implements MasterObserver {
|
||||
public static class BuggyMasterObserver implements MasterCoprocessor, MasterObserver {
|
||||
private boolean preCreateTableCalled;
|
||||
private boolean postCreateTableCalled;
|
||||
private boolean startCalled;
|
||||
private boolean postStartMasterCalled;
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> env,
|
||||
TableDescriptor desc, RegionInfo[] regions) throws IOException {
|
||||
|
@ -163,8 +169,7 @@ public class TestMasterCoprocessorExceptionWithAbort {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
BuggyMasterObserver cp = (BuggyMasterObserver)host.findCoprocessor(
|
||||
BuggyMasterObserver.class.getName());
|
||||
BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
|
||||
assertFalse("No table created yet", cp.wasCreateTableCalled());
|
||||
|
||||
// set a watch on the zookeeper /hbase/master node. If the master dies,
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
|
@ -73,12 +74,17 @@ public class TestMasterCoprocessorExceptionWithRemove {
|
|||
}
|
||||
}
|
||||
|
||||
public static class BuggyMasterObserver implements MasterObserver {
|
||||
public static class BuggyMasterObserver implements MasterCoprocessor, MasterObserver {
|
||||
private boolean preCreateTableCalled;
|
||||
private boolean postCreateTableCalled;
|
||||
private boolean startCalled;
|
||||
private boolean postStartMasterCalled;
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@SuppressWarnings("null")
|
||||
@Override
|
||||
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> env,
|
||||
|
@ -144,8 +150,7 @@ public class TestMasterCoprocessorExceptionWithRemove {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
BuggyMasterObserver cp = (BuggyMasterObserver)host.findCoprocessor(
|
||||
BuggyMasterObserver.class.getName());
|
||||
BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
|
||||
assertFalse("No table created yet", cp.wasCreateTableCalled());
|
||||
|
||||
// Set a watch on the zookeeper /hbase/master node. If the master dies,
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
|
@ -95,7 +96,7 @@ public class TestMasterObserver {
|
|||
public static CountDownLatch tableCreationLatch = new CountDownLatch(1);
|
||||
public static CountDownLatch tableDeletionLatch = new CountDownLatch(1);
|
||||
|
||||
public static class CPMasterObserver implements MasterObserver {
|
||||
public static class CPMasterObserver implements MasterCoprocessor, MasterObserver {
|
||||
|
||||
private boolean bypass = false;
|
||||
private boolean preCreateTableCalled;
|
||||
|
@ -282,6 +283,11 @@ public class TestMasterObserver {
|
|||
postLockHeartbeatCalled = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<MasterObserver> getMasterObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preMergeRegions(
|
||||
final ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
|
@ -1503,8 +1509,7 @@ public class TestMasterObserver {
|
|||
assertTrue("Master should be active", master.isActiveMaster());
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
assertNotNull("CoprocessorHost should not be null", host);
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp);
|
||||
|
||||
// check basic lifecycle
|
||||
|
@ -1521,8 +1526,7 @@ public class TestMasterObserver {
|
|||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.enableBypass(true);
|
||||
cp.resetStates();
|
||||
assertFalse("No table created yet", cp.wasCreateTableCalled());
|
||||
|
@ -1698,8 +1702,7 @@ public class TestMasterObserver {
|
|||
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
// create a table
|
||||
|
@ -1760,8 +1763,7 @@ public class TestMasterObserver {
|
|||
String testNamespace = "observed_ns";
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
|
||||
cp.enableBypass(false);
|
||||
cp.resetStates();
|
||||
|
@ -1866,8 +1868,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.enableBypass(false);
|
||||
cp.resetStates();
|
||||
|
||||
|
@ -1955,8 +1956,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
GetTableDescriptorsRequest req =
|
||||
|
@ -1973,8 +1973,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
master.getMasterRpcServices().getTableNames(null,
|
||||
|
@ -1989,8 +1988,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
master.abortProcedure(1, true);
|
||||
|
@ -2005,8 +2003,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
master.getProcedures();
|
||||
|
@ -2021,8 +2018,7 @@ public class TestMasterObserver {
|
|||
|
||||
HMaster master = cluster.getMaster();
|
||||
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
|
||||
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
master.getLocks();
|
||||
|
@ -2043,8 +2039,7 @@ public class TestMasterObserver {
|
|||
@Test
|
||||
public void testQueueLockAndLockHeartbeatOperations() throws Exception {
|
||||
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
|
||||
CPMasterObserver cp = (CPMasterObserver)master.getMasterCoprocessorHost().findCoprocessor(
|
||||
CPMasterObserver.class.getName());
|
||||
CPMasterObserver cp = master.getMasterCoprocessorHost().findCoprocessor(CPMasterObserver.class);
|
||||
cp.resetStates();
|
||||
|
||||
final TableName tableName = TableName.valueOf("testLockedTable");
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.junit.experimental.categories.Category;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
@ -63,7 +64,12 @@ public class TestOpenTableInCoprocessor {
|
|||
/**
|
||||
* Custom coprocessor that just copies the write to another table.
|
||||
*/
|
||||
public static class SendToOtherTableCoprocessor implements RegionObserver {
|
||||
public static class SendToOtherTableCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
|
||||
|
@ -80,7 +86,7 @@ public class TestOpenTableInCoprocessor {
|
|||
/**
|
||||
* Coprocessor that creates an HTable with a pool to write to another table
|
||||
*/
|
||||
public static class CustomThreadPoolCoprocessor implements RegionObserver {
|
||||
public static class CustomThreadPoolCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
|
||||
/**
|
||||
* Get a pool that has only ever one thread. A second action added to the pool (running
|
||||
|
@ -97,6 +103,11 @@ public class TestOpenTableInCoprocessor {
|
|||
return pool;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
|
||||
final WALEdit edit, final Durability durability) throws IOException {
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -204,7 +205,12 @@ public class TestRegionObserverBypass {
|
|||
t.delete(d);
|
||||
}
|
||||
|
||||
public static class TestCoprocessor implements RegionObserver {
|
||||
public static class TestCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Put put, final WALEdit edit, final Durability durability)
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -194,7 +195,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestMultiMutationCoprocessor implements RegionObserver {
|
||||
public static class TestMultiMutationCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
@ -211,7 +217,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestDeleteCellCoprocessor implements RegionObserver {
|
||||
public static class TestDeleteCellCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
@ -230,7 +241,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestDeleteFamilyCoprocessor implements RegionObserver {
|
||||
public static class TestDeleteFamilyCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
@ -249,7 +265,12 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestDeleteRowCoprocessor implements RegionObserver {
|
||||
public static class TestDeleteRowCoprocessor implements RegionCoprocessor, RegionObserver {
|
||||
@Override
|
||||
public Optional<RegionObserver> getRegionObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
|
||||
|
@ -268,8 +289,14 @@ public class TestRegionObserverForAddingMutationsFromCoprocessors {
|
|||
}
|
||||
}
|
||||
|
||||
public static class TestWALObserver implements WALObserver {
|
||||
public static class TestWALObserver implements WALCoprocessor, WALObserver {
|
||||
static WALEdit savedEdit = null;
|
||||
|
||||
@Override
|
||||
public Optional<WALObserver> getWALObserver() {
|
||||
return Optional.of(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
|
||||
RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue