HBASE-18747 Introduce new example and helper classes to tell CP users how to do filtering on scanners
This commit is contained in:
parent
7a25ed6d7c
commit
104595137e
|
@ -183,6 +183,18 @@
|
||||||
<groupId>com.google.protobuf</groupId>
|
<groupId>com.google.protobuf</groupId>
|
||||||
<artifactId>protobuf-java</artifactId>
|
<artifactId>protobuf-java</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-framework</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-client</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-recipes</artifactId>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.github.stephenc.findbugs</groupId>
|
<groupId>com.github.stephenc.findbugs</groupId>
|
||||||
<artifactId>findbugs-annotations</artifactId>
|
<artifactId>findbugs-annotations</artifactId>
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.coprocessor.example;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.Cell;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A simple delegation for doing filtering on {@link InternalScanner}.
|
||||||
|
*/
|
||||||
|
public class DelegatingInternalScanner implements InternalScanner {
|
||||||
|
|
||||||
|
protected final InternalScanner scanner;
|
||||||
|
|
||||||
|
public DelegatingInternalScanner(InternalScanner scanner) {
|
||||||
|
this.scanner = scanner;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
||||||
|
return scanner.next(result, scannerContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
scanner.close();
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
/*
|
/**
|
||||||
* Copyright The Apache Software Foundation
|
* Copyright The Apache Software Foundation
|
||||||
*
|
*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
@ -20,54 +20,40 @@ package org.apache.hadoop.hbase.coprocessor.example;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableSet;
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.OptionalInt;
|
import java.util.OptionalLong;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.curator.framework.CuratorFramework;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||||
|
import org.apache.curator.framework.recipes.cache.ChildData;
|
||||||
|
import org.apache.curator.framework.recipes.cache.NodeCache;
|
||||||
|
import org.apache.curator.retry.RetryForever;
|
||||||
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
import org.apache.hadoop.hbase.CoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.client.IsolationLevel;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanInfo;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.ScannerContext;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
import org.apache.zookeeper.KeeperException;
|
|
||||||
import org.apache.zookeeper.WatchedEvent;
|
|
||||||
import org.apache.zookeeper.Watcher;
|
|
||||||
import org.apache.zookeeper.ZooKeeper;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is an example showing how a RegionObserver could configured
|
* This is an example showing how a RegionObserver could configured via ZooKeeper in order to
|
||||||
* via ZooKeeper in order to control a Region compaction, flush, and scan policy.
|
* control a Region compaction, flush, and scan policy. This also demonstrated the use of shared
|
||||||
*
|
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state. See
|
||||||
* This also demonstrated the use of shared
|
* {@link RegionCoprocessorEnvironment#getSharedData()}.
|
||||||
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
|
* <p>
|
||||||
* See {@link RegionCoprocessorEnvironment#getSharedData()}.
|
* This would be useful for an incremental backup tool, which would indicate the last time of a
|
||||||
*
|
* successful backup via ZK and instruct HBase that to safely delete the data which has already been
|
||||||
* This would be useful for an incremental backup tool, which would indicate the last
|
* backup.
|
||||||
* time of a successful backup via ZK and instruct HBase to not delete data that was
|
|
||||||
* inserted since (based on wall clock time).
|
|
||||||
*
|
|
||||||
* This implements org.apache.zookeeper.Watcher directly instead of using
|
|
||||||
* {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
|
|
||||||
* because RegionObservers come and go and currently
|
|
||||||
* listeners registered with ZooKeeperWatcher cannot be removed.
|
|
||||||
*/
|
*/
|
||||||
public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObserver {
|
public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObserver {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Optional<RegionObserver> getRegionObserver() {
|
public Optional<RegionObserver> getRegionObserver() {
|
||||||
return Optional.of(this);
|
return Optional.of(this);
|
||||||
|
@ -78,178 +64,138 @@ public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObs
|
||||||
public static final String ZK_SESSION_TIMEOUT_KEY =
|
public static final String ZK_SESSION_TIMEOUT_KEY =
|
||||||
"ZooKeeperScanPolicyObserver.zookeeper.session.timeout";
|
"ZooKeeperScanPolicyObserver.zookeeper.session.timeout";
|
||||||
public static final int ZK_SESSION_TIMEOUT_DEFAULT = 30 * 1000; // 30 secs
|
public static final int ZK_SESSION_TIMEOUT_DEFAULT = 30 * 1000; // 30 secs
|
||||||
public static final String node = "/backup/example/lastbackup";
|
public static final String NODE = "/backup/example/lastbackup";
|
||||||
public static final String zkkey = "ZK";
|
private static final String ZKKEY = "ZK";
|
||||||
private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class);
|
|
||||||
|
|
||||||
private ZooKeeper zk = null;
|
private NodeCache cache;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal watcher that keep "data" up to date asynchronously.
|
* Internal watcher that keep "data" up to date asynchronously.
|
||||||
*/
|
*/
|
||||||
private static class ZKWatcher implements Watcher {
|
private static final class ZKDataHolder {
|
||||||
private byte[] data = null;
|
|
||||||
private ZooKeeper zk;
|
|
||||||
private volatile boolean needSetup = true;
|
|
||||||
private volatile long lastSetupTry = 0;
|
|
||||||
|
|
||||||
public ZKWatcher(ZooKeeper zk) {
|
private final String ensemble;
|
||||||
this.zk = zk;
|
|
||||||
// trigger the listening
|
private final int sessionTimeout;
|
||||||
getData();
|
|
||||||
|
private CuratorFramework client;
|
||||||
|
|
||||||
|
private NodeCache cache;
|
||||||
|
|
||||||
|
private int ref;
|
||||||
|
|
||||||
|
public ZKDataHolder(String ensemble, int sessionTimeout) {
|
||||||
|
this.ensemble = ensemble;
|
||||||
|
this.sessionTimeout = sessionTimeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private void create() throws Exception {
|
||||||
* Get the maintained data. In case of any ZK exceptions this will retry
|
client =
|
||||||
* establishing the connection (but not more than twice/minute).
|
CuratorFrameworkFactory.builder().connectString(ensemble).sessionTimeoutMs(sessionTimeout)
|
||||||
*
|
.retryPolicy(new RetryForever(1000)).canBeReadOnly(true).build();
|
||||||
* getData is on the critical path, so make sure it is fast unless there is
|
client.start();
|
||||||
* a problem (network partion, ZK ensemble down, etc)
|
cache = new NodeCache(client, NODE);
|
||||||
* Make sure at most one (unlucky) thread retries and other threads don't pile up
|
cache.start(true);
|
||||||
* while that threads tries to recreate the connection.
|
|
||||||
*
|
|
||||||
* @return the last know version of the data
|
|
||||||
*/
|
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION")
|
|
||||||
public byte[] getData() {
|
|
||||||
// try at most twice/minute
|
|
||||||
if (needSetup && EnvironmentEdgeManager.currentTime() > lastSetupTry + 30000) {
|
|
||||||
synchronized (this) {
|
|
||||||
// make sure only one thread tries to reconnect
|
|
||||||
if (needSetup) {
|
|
||||||
needSetup = false;
|
|
||||||
} else {
|
|
||||||
return data;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// do this without the lock held to avoid threads piling up on this lock,
|
private void close() {
|
||||||
// as it can take a while
|
if (cache != null) {
|
||||||
try {
|
try {
|
||||||
LOG.debug("Connecting to ZK");
|
cache.close();
|
||||||
// record this attempt
|
} catch (IOException e) {
|
||||||
lastSetupTry = EnvironmentEdgeManager.currentTime();
|
// should not happen
|
||||||
if (zk.exists(node, false) != null) {
|
throw new AssertionError(e);
|
||||||
data = zk.getData(node, this, null);
|
|
||||||
LOG.debug("Read synchronously: "+(data == null ? "null" : Bytes.toLong(data)));
|
|
||||||
} else {
|
|
||||||
zk.exists(node, this);
|
|
||||||
}
|
}
|
||||||
} catch (Exception x) {
|
cache = null;
|
||||||
// try again if this fails
|
|
||||||
needSetup = true;
|
|
||||||
}
|
}
|
||||||
|
if (client != null) {
|
||||||
|
client.close();
|
||||||
|
client = null;
|
||||||
}
|
}
|
||||||
return data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public synchronized NodeCache acquire() throws Exception {
|
||||||
public void process(WatchedEvent event) {
|
if (ref == 0) {
|
||||||
switch (event.getType()) {
|
|
||||||
case NodeDataChanged:
|
|
||||||
case NodeCreated:
|
|
||||||
try {
|
try {
|
||||||
// get data and re-watch
|
create();
|
||||||
data = zk.getData(node, this, null);
|
} catch (Exception e) {
|
||||||
LOG.debug("Read asynchronously: " + (data == null ? "null" : Bytes.toLong(data)));
|
close();
|
||||||
} catch (InterruptedException ix) {
|
throw e;
|
||||||
} catch (KeeperException kx) {
|
|
||||||
needSetup = true;
|
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
|
||||||
case NodeDeleted:
|
|
||||||
try {
|
|
||||||
// just re-watch
|
|
||||||
zk.exists(node, this);
|
|
||||||
data = null;
|
|
||||||
} catch (InterruptedException ix) {
|
|
||||||
} catch (KeeperException kx) {
|
|
||||||
needSetup = true;
|
|
||||||
}
|
}
|
||||||
break;
|
ref++;
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
public synchronized void release() {
|
||||||
// ignore
|
ref--;
|
||||||
|
if (ref == 0) {
|
||||||
|
close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start(CoprocessorEnvironment e) throws IOException {
|
public void start(CoprocessorEnvironment env) throws IOException {
|
||||||
RegionCoprocessorEnvironment re = (RegionCoprocessorEnvironment) e;
|
RegionCoprocessorEnvironment renv = (RegionCoprocessorEnvironment) env;
|
||||||
if (!re.getSharedData().containsKey(zkkey)) {
|
try {
|
||||||
// there is a short race here
|
this.cache = ((ZKDataHolder) renv.getSharedData().computeIfAbsent(ZKKEY, k -> {
|
||||||
// in the worst case we create a watcher that will be notified once
|
String ensemble = renv.getConfiguration().get(ZK_ENSEMBLE_KEY);
|
||||||
String ensemble = re.getConfiguration().get(ZK_ENSEMBLE_KEY);
|
int sessionTimeout =
|
||||||
int sessionTimeout = re.getConfiguration().getInt(ZK_SESSION_TIMEOUT_KEY,
|
renv.getConfiguration().getInt(ZK_SESSION_TIMEOUT_KEY, ZK_SESSION_TIMEOUT_DEFAULT);
|
||||||
ZK_SESSION_TIMEOUT_DEFAULT);
|
return new ZKDataHolder(ensemble, sessionTimeout);
|
||||||
this.zk = new ZooKeeper(ensemble, sessionTimeout, null);
|
})).acquire();
|
||||||
re.getSharedData().putIfAbsent(zkkey, new ZKWatcher(zk));
|
} catch (Exception e) {
|
||||||
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void stop(CoprocessorEnvironment env) throws IOException {
|
public void stop(CoprocessorEnvironment env) throws IOException {
|
||||||
if (this.zk != null) {
|
RegionCoprocessorEnvironment renv = (RegionCoprocessorEnvironment) env;
|
||||||
try {
|
this.cache = null;
|
||||||
this.zk.close();
|
((ZKDataHolder) renv.getSharedData().get(ZKKEY)).release();
|
||||||
} catch (InterruptedException e) {
|
|
||||||
LOG.error("Excepion while closing the ZK connection!", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected ScanInfo getScanInfo(Store store, RegionCoprocessorEnvironment e) {
|
private OptionalLong getExpireBefore() {
|
||||||
byte[] data = ((ZKWatcher) e.getSharedData().get(zkkey)).getData();
|
ChildData data = cache.getCurrentData();
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
return null;
|
return OptionalLong.empty();
|
||||||
}
|
}
|
||||||
ScanInfo oldSI = ((HStore) store).getScanInfo();
|
byte[] bytes = data.getData();
|
||||||
if (oldSI.getTtl() == Long.MAX_VALUE) {
|
if (bytes == null || bytes.length != Long.BYTES) {
|
||||||
return null;
|
return OptionalLong.empty();
|
||||||
}
|
}
|
||||||
long ttl = Math.max(EnvironmentEdgeManager.currentTime() - Bytes.toLong(data), oldSI.getTtl());
|
return OptionalLong.of(Bytes.toLong(bytes));
|
||||||
return new ScanInfo(oldSI.getConfiguration(), store.getColumnFamilyDescriptor(), ttl,
|
}
|
||||||
oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
|
|
||||||
|
private InternalScanner wrap(InternalScanner scanner) {
|
||||||
|
OptionalLong optExpireBefore = getExpireBefore();
|
||||||
|
if (!optExpireBefore.isPresent()) {
|
||||||
|
return scanner;
|
||||||
|
}
|
||||||
|
long expireBefore = optExpireBefore.getAsLong();
|
||||||
|
return new DelegatingInternalScanner(scanner) {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
||||||
|
boolean moreRows = scanner.next(result, scannerContext);
|
||||||
|
result.removeIf(c -> c.getTimestamp() < expireBefore);
|
||||||
|
return moreRows;
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||||
Store store, List<KeyValueScanner> scanners, InternalScanner s, long readPoint)
|
InternalScanner scanner) throws IOException {
|
||||||
throws IOException {
|
return wrap(scanner);
|
||||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
|
||||||
if (scanInfo == null) {
|
|
||||||
// take default action
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), scanners,
|
|
||||||
ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
|
||||||
Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
|
InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker,
|
||||||
InternalScanner s, CompactionLifeCycleTracker tracker, CompactionRequest request,
|
CompactionRequest request) throws IOException {
|
||||||
long readPoint) throws IOException {
|
return wrap(scanner);
|
||||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
|
||||||
if (scanInfo == null) {
|
|
||||||
// take default action
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), scanners, scanType,
|
|
||||||
store.getSmallestReadPoint(), earliestPutTs);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KeyValueScanner preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
|
|
||||||
Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPoint)
|
|
||||||
throws IOException {
|
|
||||||
ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
|
|
||||||
if (scanInfo == null) {
|
|
||||||
// take default action
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return new StoreScanner((HStore) store, scanInfo, scan, targetCols,
|
|
||||||
((HStore) store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.coprocessor.example;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.zookeeper.CreateMode;
|
||||||
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
import org.apache.zookeeper.ZooDefs;
|
||||||
|
import org.apache.zookeeper.ZooKeeper;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@Category({ CoprocessorTests.class, MediumTests.class })
|
||||||
|
public class TestZooKeeperScanPolicyObserver {
|
||||||
|
|
||||||
|
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
|
private static TableName NAME = TableName.valueOf("TestCP");
|
||||||
|
|
||||||
|
private static byte[] FAMILY = Bytes.toBytes("cf");
|
||||||
|
|
||||||
|
private static byte[] QUALIFIER = Bytes.toBytes("cq");
|
||||||
|
|
||||||
|
private static Table TABLE;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws Exception {
|
||||||
|
UTIL.startMiniCluster(3);
|
||||||
|
UTIL.getAdmin()
|
||||||
|
.createTable(TableDescriptorBuilder.newBuilder(NAME)
|
||||||
|
.addCoprocessor(ZooKeeperScanPolicyObserver.class.getName())
|
||||||
|
.setValue(ZooKeeperScanPolicyObserver.ZK_ENSEMBLE_KEY,
|
||||||
|
"localhost:" + UTIL.getZkCluster().getClientPort())
|
||||||
|
.setValue(ZooKeeperScanPolicyObserver.ZK_SESSION_TIMEOUT_KEY, "2000")
|
||||||
|
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()).build());
|
||||||
|
TABLE = UTIL.getConnection().getTable(NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDown() throws Exception {
|
||||||
|
if (TABLE != null) {
|
||||||
|
TABLE.close();
|
||||||
|
}
|
||||||
|
UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setExpireBefore(long time)
|
||||||
|
throws KeeperException, InterruptedException, IOException {
|
||||||
|
ZooKeeper zk = UTIL.getZooKeeperWatcher().getRecoverableZooKeeper().getZooKeeper();
|
||||||
|
if (zk.exists(ZooKeeperScanPolicyObserver.NODE, false) == null) {
|
||||||
|
zk.create(ZooKeeperScanPolicyObserver.NODE, Bytes.toBytes(time), ZooDefs.Ids.OPEN_ACL_UNSAFE,
|
||||||
|
CreateMode.PERSISTENT);
|
||||||
|
} else {
|
||||||
|
zk.setData(ZooKeeperScanPolicyObserver.NODE, Bytes.toBytes(time), -1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertValueEquals(int start, int end) throws IOException {
|
||||||
|
for (int i = start; i < end; i++) {
|
||||||
|
assertEquals(i,
|
||||||
|
Bytes.toInt(TABLE.get(new Get(Bytes.toBytes(i))).getValue(FAMILY, QUALIFIER)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertNotExists(int start, int end) throws IOException {
|
||||||
|
for (int i = start; i < end; i++) {
|
||||||
|
assertFalse(TABLE.exists(new Get(Bytes.toBytes(i))));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void put(int start, int end, long ts) throws IOException {
|
||||||
|
for (int i = start; i < end; i++) {
|
||||||
|
TABLE.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUALIFIER, ts, Bytes.toBytes(i)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() throws IOException, KeeperException, InterruptedException {
|
||||||
|
long now = System.currentTimeMillis();
|
||||||
|
put(0, 100, now - 10000);
|
||||||
|
assertValueEquals(0, 100);
|
||||||
|
|
||||||
|
setExpireBefore(now - 5000);
|
||||||
|
Thread.sleep(5000);
|
||||||
|
UTIL.getAdmin().flush(NAME);
|
||||||
|
assertNotExists(0, 100);
|
||||||
|
|
||||||
|
put(0, 50, now - 1000);
|
||||||
|
UTIL.getAdmin().flush(NAME);
|
||||||
|
put(50, 100, now - 100);
|
||||||
|
UTIL.getAdmin().flush(NAME);
|
||||||
|
assertValueEquals(0, 100);
|
||||||
|
|
||||||
|
setExpireBefore(now - 500);
|
||||||
|
Thread.sleep(5000);
|
||||||
|
UTIL.getAdmin().majorCompact(NAME);
|
||||||
|
UTIL.waitFor(30000, () -> UTIL.getHBaseCluster().getRegions(NAME).iterator().next()
|
||||||
|
.getStore(FAMILY).getStorefilesCount() == 1);
|
||||||
|
assertNotExists(0, 50);
|
||||||
|
assertValueEquals(50, 100);
|
||||||
|
}
|
||||||
|
}
|
|
@ -46,11 +46,13 @@ import org.apache.yetus.audience.InterfaceStability;
|
||||||
public interface InternalScanner extends Closeable {
|
public interface InternalScanner extends Closeable {
|
||||||
/**
|
/**
|
||||||
* Grab the next row's worth of values.
|
* Grab the next row's worth of values.
|
||||||
* @param results return output array
|
* @param result return output array
|
||||||
* @return true if more rows exist after this one, false if scanner is done
|
* @return true if more rows exist after this one, false if scanner is done
|
||||||
* @throws IOException e
|
* @throws IOException e
|
||||||
*/
|
*/
|
||||||
boolean next(List<Cell> results) throws IOException;
|
default boolean next(List<Cell> result) throws IOException {
|
||||||
|
return next(result, NoLimitScannerContext.getInstance());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Grab the next row's worth of values.
|
* Grab the next row's worth of values.
|
||||||
|
|
|
@ -140,14 +140,8 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner
|
||||||
* <p>
|
* <p>
|
||||||
* This can ONLY be called when you are using Scanners that implement InternalScanner as well as
|
* This can ONLY be called when you are using Scanners that implement InternalScanner as well as
|
||||||
* KeyValueScanner (a {@link StoreScanner}).
|
* KeyValueScanner (a {@link StoreScanner}).
|
||||||
* @param result
|
|
||||||
* @return true if more rows exist after this one, false if scanner is done
|
* @return true if more rows exist after this one, false if scanner is done
|
||||||
*/
|
*/
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> result) throws IOException {
|
|
||||||
return next(result, NoLimitScannerContext.getInstance());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
||||||
if (this.current == null) {
|
if (this.current == null) {
|
||||||
|
|
|
@ -503,11 +503,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
|
||||||
return this.heap.seek(key);
|
return this.heap.seek(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> outResult) throws IOException {
|
|
||||||
return next(outResult, NoLimitScannerContext.getInstance());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the next row of values from this Store.
|
* Get the next row of values from this Store.
|
||||||
* @param outResult
|
* @param outResult
|
||||||
|
|
|
@ -427,10 +427,6 @@ public class TestRegionObserverInterface {
|
||||||
InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker,
|
InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker,
|
||||||
CompactionRequest request) {
|
CompactionRequest request) {
|
||||||
return new InternalScanner() {
|
return new InternalScanner() {
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> results) throws IOException {
|
|
||||||
return next(results, NoLimitScannerContext.getInstance());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean next(List<Cell> results, ScannerContext scannerContext) throws IOException {
|
public boolean next(List<Cell> results, ScannerContext scannerContext) throws IOException {
|
||||||
|
|
|
@ -139,11 +139,6 @@ public class TestRegionObserverScannerOpenHook {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> results) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {}
|
public void close() throws IOException {}
|
||||||
};
|
};
|
||||||
|
|
|
@ -192,16 +192,11 @@ public class TestCompactor {
|
||||||
this.kvs = new ArrayList<>(Arrays.asList(kvs));
|
this.kvs = new ArrayList<>(Arrays.asList(kvs));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> results) throws IOException {
|
|
||||||
if (kvs.isEmpty()) return false;
|
|
||||||
results.add(kvs.remove(0));
|
|
||||||
return !kvs.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
public boolean next(List<Cell> result, ScannerContext scannerContext) throws IOException {
|
||||||
return next(result);
|
if (kvs.isEmpty()) return false;
|
||||||
|
result.add(kvs.remove(0));
|
||||||
|
return !kvs.isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -808,17 +808,12 @@ public class TestStripeCompactionPolicy {
|
||||||
this.kvs = new ArrayList<>(Arrays.asList(kvs));
|
this.kvs = new ArrayList<>(Arrays.asList(kvs));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean next(List<Cell> results) throws IOException {
|
|
||||||
if (kvs.isEmpty()) return false;
|
|
||||||
results.add(kvs.remove(0));
|
|
||||||
return !kvs.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean next(List<Cell> result, ScannerContext scannerContext)
|
public boolean next(List<Cell> result, ScannerContext scannerContext)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return next(result);
|
if (kvs.isEmpty()) return false;
|
||||||
|
result.add(kvs.remove(0));
|
||||||
|
return !kvs.isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
15
pom.xml
15
pom.xml
|
@ -2050,6 +2050,21 @@
|
||||||
</exclusion>
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.curator</groupId>
|
||||||
|
<artifactId>curator-recipes</artifactId>
|
||||||
|
<version>${curator.version}</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.google.guava</groupId>
|
||||||
|
<artifactId>guava</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.yetus</groupId>
|
<groupId>org.apache.yetus</groupId>
|
||||||
<artifactId>audience-annotations</artifactId>
|
<artifactId>audience-annotations</artifactId>
|
||||||
|
|
Loading…
Reference in New Issue