HBASE-18793 Remove deprecated methods in RegionObserver
This commit is contained in:
parent
a797fe0daa
commit
a977055ecd
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
|
|||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
@ -187,8 +188,9 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
|
||||
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s, long readPoint)
|
||||
throws IOException {
|
||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
||||
if (scanInfo == null) {
|
||||
// take default action
|
||||
|
@ -199,10 +201,9 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompactScannerOpen(
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
||||
InternalScanner s) throws IOException {
|
||||
InternalScanner s, CompactionRequest request, long readPoint) throws IOException {
|
||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
||||
if (scanInfo == null) {
|
||||
// take default action
|
||||
|
@ -213,9 +214,9 @@ public class ZooKeeperScanPolicyObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
|
||||
final KeyValueScanner s) throws IOException {
|
||||
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPoint)
|
||||
throws IOException {
|
||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
||||
if (scanInfo == null) {
|
||||
// take default action
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.Service;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -37,8 +40,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CompareOperator;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CompareOperator;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
@ -73,17 +76,15 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
|||
import org.apache.hadoop.hbase.regionserver.Region.Operation;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
|
||||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.Service;
|
||||
|
||||
/**
|
||||
* Implements the coprocessor environment and runtime support for coprocessors
|
||||
|
@ -96,8 +97,8 @@ public class RegionCoprocessorHost
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(RegionCoprocessorHost.class);
|
||||
// The shared data map
|
||||
private static ReferenceMap sharedDataMap =
|
||||
new ReferenceMap(AbstractReferenceMap.ReferenceStrength.HARD,
|
||||
private static final ReferenceMap<String, ConcurrentMap<String, Object>> SHARED_DATA_MAP =
|
||||
new ReferenceMap<>(AbstractReferenceMap.ReferenceStrength.HARD,
|
||||
AbstractReferenceMap.ReferenceStrength.WEAK);
|
||||
|
||||
// optimization: no need to call postScannerFilterRow, if no coprocessor implements it
|
||||
|
@ -401,14 +402,11 @@ public class RegionCoprocessorHost
|
|||
}
|
||||
ConcurrentMap<String, Object> classData;
|
||||
// make sure only one thread can add maps
|
||||
synchronized (sharedDataMap) {
|
||||
synchronized (SHARED_DATA_MAP) {
|
||||
// as long as at least one RegionEnvironment holds on to its classData it will
|
||||
// remain in this map
|
||||
classData = (ConcurrentMap<String, Object>)sharedDataMap.get(implClass.getName());
|
||||
if (classData == null) {
|
||||
classData = new ConcurrentHashMap<>();
|
||||
sharedDataMap.put(implClass.getName(), classData);
|
||||
}
|
||||
classData =
|
||||
SHARED_DATA_MAP.computeIfAbsent(implClass.getName(), k -> new ConcurrentHashMap<>());
|
||||
}
|
||||
return new RegionEnvironment(instance, priority, seq, conf, region,
|
||||
rsServices, classData);
|
||||
|
@ -672,136 +670,7 @@ public class RegionCoprocessorHost
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked just before a split
|
||||
* @throws IOException
|
||||
*/
|
||||
@Deprecated
|
||||
public void preSplit(final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preSplit(ctx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked just before a split
|
||||
* @throws IOException
|
||||
*
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public void preSplit(final byte[] splitRow, final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preSplit(ctx, splitRow);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked just after a split
|
||||
* @param l the new left-hand daughter region
|
||||
* @param r the new right-hand daughter region
|
||||
* @throws IOException
|
||||
*
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public void postSplit(final Region l, final Region r, final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.postSplit(ctx, l, r);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean preSplitBeforePONR(final byte[] splitKey,
|
||||
final List<Mutation> metaEntries, final User user) throws IOException {
|
||||
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preSplitBeforePONR(ctx, splitKey, metaEntries);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public void preSplitAfterPONR(final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preSplitAfterPONR(ctx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked just before the rollback of a failed split is started
|
||||
* @throws IOException
|
||||
*
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public void preRollBackSplit(final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.preRollBackSplit(ctx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked just after the rollback of a failed split is done
|
||||
* @throws IOException
|
||||
*
|
||||
* Note: the logic moves to Master; it is unused in RS
|
||||
*/
|
||||
@Deprecated
|
||||
public void postRollBackSplit(final User user) throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation(user) {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.postRollBackSplit(ctx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked after a split is completed irrespective of a failure or success.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void postCompleteSplit() throws IOException {
|
||||
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
|
||||
@Override
|
||||
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
|
||||
throws IOException {
|
||||
oserver.postCompleteSplit(ctx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// RegionObserver support
|
||||
|
||||
/**
|
||||
* @param get the Get request
|
||||
* @return true if default processing should be bypassed
|
||||
|
@ -1272,8 +1141,7 @@ public class RegionCoprocessorHost
|
|||
|
||||
/**
|
||||
* See
|
||||
* {@link RegionObserver#preStoreScannerOpen(ObserverContext,
|
||||
* Store, Scan, NavigableSet, KeyValueScanner)}
|
||||
* {@link RegionObserver#preStoreScannerOpen(ObserverContext, Store, Scan, NavigableSet, KeyValueScanner, long)}
|
||||
*/
|
||||
public KeyValueScanner preStoreScannerOpen(final Store store, final Scan scan,
|
||||
final NavigableSet<byte[]> targetCols, final long readPt) throws IOException {
|
||||
|
|
|
@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
|||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
|
||||
|
@ -1530,9 +1531,8 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Store store, final InternalScanner scanner, final ScanType scanType)
|
||||
throws IOException {
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
InternalScanner scanner, ScanType scanType, CompactionRequest request) throws IOException {
|
||||
requirePermission(getActiveUser(c), "compact", getTableName(c.getEnvironment()), null, null,
|
||||
Action.ADMIN, Action.CREATE);
|
||||
return scanner;
|
||||
|
@ -1895,30 +1895,6 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final byte [] row, final byte [] family, final byte [] qualifier,
|
||||
final long amount, final boolean writeToWAL)
|
||||
throws IOException {
|
||||
// Require WRITE permission to the table, CF, and the KV to be replaced by the
|
||||
// incremented value
|
||||
RegionCoprocessorEnvironment env = c.getEnvironment();
|
||||
Map<byte[],? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
|
||||
User user = getActiveUser(c);
|
||||
AuthResult authResult = permissionGranted(OpType.INCREMENT_COLUMN_VALUE, user, env, families,
|
||||
Action.WRITE);
|
||||
if (!authResult.isAllowed() && cellFeaturesEnabled && !compatibleEarlyTermination) {
|
||||
authResult.setAllowed(checkCoveringPermission(user, OpType.INCREMENT_COLUMN_VALUE, env, row,
|
||||
families, HConstants.LATEST_TIMESTAMP, Action.WRITE));
|
||||
authResult.setReason("Covering cell set");
|
||||
}
|
||||
logResult(authResult);
|
||||
if (authorizationEnabled && !authResult.isAllowed()) {
|
||||
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append)
|
||||
throws IOException {
|
||||
|
|
|
@ -255,15 +255,8 @@ public class TestAvoidCellReferencesIntoShippedBlocks {
|
|||
public static class CompactorRegionObserver implements RegionObserver {
|
||||
@Override
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
|
||||
long earliestPutTs, InternalScanner s) throws IOException {
|
||||
return createCompactorScanner(store, scanners, scanType, earliestPutTs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
|
||||
long earliestPutTs, InternalScanner s, CompactionRequest request) throws IOException {
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
||||
InternalScanner s, CompactionRequest request, long readPoint) throws IOException {
|
||||
return createCompactorScanner(store, scanners, scanType, earliestPutTs);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,18 @@
|
|||
|
||||
package org.apache.hadoop.hbase.coprocessor;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -43,30 +55,19 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
|||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.Leases;
|
||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.Region.Operation;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileReader;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* A sample region observer that tests the RegionObserver interface.
|
||||
|
@ -82,8 +83,6 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
final AtomicInteger ctPreFlush = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreFlushScannerOpen = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostFlush = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreSplit = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostSplit = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreCompactSelect = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostCompactSelect = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreCompactScanner = new AtomicInteger(0);
|
||||
|
@ -124,8 +123,6 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
final AtomicInteger ctPostReplayWALs = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreWALRestore = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostWALRestore = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreSplitBeforePONR = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreSplitAfterPONR = new AtomicInteger(0);
|
||||
final AtomicInteger ctPreStoreFileReaderOpen = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostStoreFileReaderOpen = new AtomicInteger(0);
|
||||
final AtomicInteger ctPostBatchMutateIndispensably = new AtomicInteger(0);
|
||||
|
@ -184,10 +181,11 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
|
||||
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s, long readPoint)
|
||||
throws IOException {
|
||||
ctPreFlushScannerOpen.incrementAndGet();
|
||||
return null;
|
||||
return s;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -204,63 +202,36 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c) {
|
||||
ctPreSplit.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preSplitBeforePONR(
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx, byte[] splitKey,
|
||||
List<Mutation> metaEntries) throws IOException {
|
||||
ctPreSplitBeforePONR.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preSplitAfterPONR(
|
||||
ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
|
||||
ctPreSplitAfterPONR.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postSplit(ObserverContext<RegionCoprocessorEnvironment> c, Region l, Region r) {
|
||||
ctPostSplit.incrementAndGet();
|
||||
}
|
||||
|
||||
public boolean wasSplit() {
|
||||
return ctPreSplit.get() > 0 && ctPostSplit.get() > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<StoreFile> candidates) {
|
||||
public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
List<StoreFile> candidates, CompactionRequest request) throws IOException {
|
||||
ctPreCompactSelect.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, ImmutableList<StoreFile> selected) {
|
||||
public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
ImmutableList<StoreFile> selected, CompactionRequest request) {
|
||||
ctPostCompactSelect.incrementAndGet();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
Store store, InternalScanner scanner, ScanType scanType) {
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
InternalScanner scanner, ScanType scanType, CompactionRequest request) throws IOException {
|
||||
ctPreCompact.incrementAndGet();
|
||||
return scanner;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalScanner preCompactScannerOpen(
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
||||
InternalScanner s) throws IOException {
|
||||
InternalScanner s, CompactionRequest request, long readPoint) throws IOException {
|
||||
ctPreCompactScanner.incrementAndGet();
|
||||
return null;
|
||||
return s;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
Store store, StoreFile resultFile) {
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
StoreFile resultFile, CompactionRequest request) throws IOException {
|
||||
ctPostCompact.incrementAndGet();
|
||||
}
|
||||
|
||||
|
@ -277,11 +248,11 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
|
||||
final KeyValueScanner s) throws IOException {
|
||||
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPt)
|
||||
throws IOException {
|
||||
ctPreStoreScannerOpen.incrementAndGet();
|
||||
return null;
|
||||
return s;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -864,22 +835,6 @@ public class SimpleRegionObserver implements RegionObserver {
|
|||
return ctPostFlush.get();
|
||||
}
|
||||
|
||||
public int getCtPreSplit() {
|
||||
return ctPreSplit.get();
|
||||
}
|
||||
|
||||
public int getCtPreSplitBeforePONR() {
|
||||
return ctPreSplitBeforePONR.get();
|
||||
}
|
||||
|
||||
public int getCtPreSplitAfterPONR() {
|
||||
return ctPreSplitAfterPONR.get();
|
||||
}
|
||||
|
||||
public int getCtPostSplit() {
|
||||
return ctPostSplit.get();
|
||||
}
|
||||
|
||||
public int getCtPreCompactSelect() {
|
||||
return ctPreCompactSelect.get();
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
|
|||
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.Rule;
|
||||
|
@ -193,13 +194,13 @@ public class TestCoprocessorInterface {
|
|||
}
|
||||
@Override
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
Store store, InternalScanner scanner, ScanType scanType) {
|
||||
Store store, InternalScanner scanner, ScanType scanType, CompactionRequest request) {
|
||||
preCompactCalled = true;
|
||||
return scanner;
|
||||
}
|
||||
@Override
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
Store store, StoreFile resultFile) {
|
||||
Store store, StoreFile resultFile, CompactionRequest request) {
|
||||
postCompactCalled = true;
|
||||
}
|
||||
@Override
|
||||
|
|
|
@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
|
|||
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
|
||||
|
@ -416,7 +417,7 @@ public class TestRegionObserverInterface {
|
|||
|
||||
@Override
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
|
||||
final InternalScanner scanner, final ScanType scanType) {
|
||||
final InternalScanner scanner, final ScanType scanType, CompactionRequest request) {
|
||||
return new InternalScanner() {
|
||||
@Override
|
||||
public boolean next(List<Cell> results) throws IOException {
|
||||
|
@ -455,7 +456,7 @@ public class TestRegionObserverInterface {
|
|||
|
||||
@Override
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
|
||||
StoreFile resultFile) {
|
||||
StoreFile resultFile, CompactionRequest request) {
|
||||
lastCompaction = EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
|||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||
|
@ -139,7 +140,8 @@ public class TestRegionObserverScannerOpenHook {
|
|||
|
||||
@Override
|
||||
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s, long readPoint)
|
||||
throws IOException {
|
||||
scanners.forEach(KeyValueScanner::close);
|
||||
return NO_DATA;
|
||||
}
|
||||
|
@ -153,7 +155,8 @@ public class TestRegionObserverScannerOpenHook {
|
|||
@Override
|
||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
|
||||
long earliestPutTs, InternalScanner s) throws IOException {
|
||||
long earliestPutTs, InternalScanner s, CompactionRequest request, long readPoint)
|
||||
throws IOException {
|
||||
scanners.forEach(KeyValueScanner::close);
|
||||
return NO_DATA;
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.quotas.QuotaUtil;
|
|||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -459,8 +460,8 @@ public class TestNamespaceAuditor {
|
|||
volatile CountDownLatch postCompact;
|
||||
|
||||
@Override
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
Store store, StoreFile resultFile) throws IOException {
|
||||
public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
|
||||
StoreFile resultFile, CompactionRequest request) throws IOException {
|
||||
postCompact.countDown();
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.TestFromClientSideWithCoprocessor;
|
|||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
|
||||
/**
|
||||
* RegionObserver that just reimplements the default behavior,
|
||||
|
@ -44,7 +45,8 @@ public class NoOpScanPolicyObserver implements RegionObserver {
|
|||
*/
|
||||
@Override
|
||||
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s, long readPoint)
|
||||
throws IOException {
|
||||
ScanInfo oldSI = store.getScanInfo();
|
||||
ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getColumnFamilyDescriptor(),
|
||||
oldSI.getTtl(), oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
|
||||
|
@ -59,7 +61,7 @@ public class NoOpScanPolicyObserver implements RegionObserver {
|
|||
public InternalScanner preCompactScannerOpen(
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
||||
InternalScanner s) throws IOException {
|
||||
InternalScanner s, CompactionRequest request, long readPoint) throws IOException {
|
||||
// this demonstrates how to override the scanners default behavior
|
||||
ScanInfo oldSI = store.getScanInfo();
|
||||
ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), store.getColumnFamilyDescriptor(),
|
||||
|
@ -69,8 +71,8 @@ public class NoOpScanPolicyObserver implements RegionObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, final Scan scan, final NavigableSet<byte[]> targetCols, KeyValueScanner s)
|
||||
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPoint)
|
||||
throws IOException {
|
||||
Region r = c.getEnvironment().getRegion();
|
||||
return scan.isReversed() ? new ReversedStoreScanner(store,
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
|
|||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
|
||||
|
@ -252,9 +253,8 @@ public class TestHRegionServerBulkLoad {
|
|||
public static class MyObserver implements RegionObserver {
|
||||
static int sleepDuration;
|
||||
@Override
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
|
||||
final Store store, final InternalScanner scanner, final ScanType scanType)
|
||||
throws IOException {
|
||||
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
|
||||
InternalScanner scanner, ScanType scanType, CompactionRequest request) throws IOException {
|
||||
try {
|
||||
Thread.sleep(sleepDuration);
|
||||
} catch (InterruptedException ie) {
|
||||
|
|
|
@ -902,7 +902,7 @@ public class TestAccessController extends SecureTestUtil {
|
|||
@Override
|
||||
public Object run() throws Exception {
|
||||
ACCESS_CONTROLLER.preCompact(ObserverContext.createAndPrepare(RCP_ENV, null), null, null,
|
||||
ScanType.COMPACT_RETAIN_DELETES);
|
||||
ScanType.COMPACT_RETAIN_DELETES, null);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
|
|||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
@ -239,8 +240,8 @@ public class TestCoprocessorScanPolicy {
|
|||
|
||||
@Override
|
||||
public InternalScanner preFlushScannerOpen(
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||
Store store, List<KeyValueScanner> scanners, InternalScanner s) throws IOException {
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
List<KeyValueScanner> scanners, InternalScanner s, long readPoint) throws IOException {
|
||||
Long newTtl = ttls.get(store.getTableName());
|
||||
if (newTtl != null) {
|
||||
System.out.println("PreFlush:" + newTtl);
|
||||
|
@ -262,7 +263,7 @@ public class TestCoprocessorScanPolicy {
|
|||
public InternalScanner preCompactScannerOpen(
|
||||
final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
||||
InternalScanner s) throws IOException {
|
||||
InternalScanner s, CompactionRequest request, long readPoint) throws IOException {
|
||||
Long newTtl = ttls.get(store.getTableName());
|
||||
Integer newVersions = versions.get(store.getTableName());
|
||||
ScanInfo oldSI = store.getScanInfo();
|
||||
|
|
Loading…
Reference in New Issue