HBASE-6292. Compact can skip the security access control (ShiXing)
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1355825 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ef55fb21de
commit
cbd82f0440
|
@ -93,7 +93,7 @@ public abstract class BaseRegionObserver implements RegionObserver {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Store store, final List<StoreFile> candidates) { }
|
final Store store, final List<StoreFile> candidates) throws IOException { }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
|
|
|
@ -85,9 +85,10 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* @param c the environment provided by the region server
|
* @param c the environment provided by the region server
|
||||||
* @param store the store where compaction is being requested
|
* @param store the store where compaction is being requested
|
||||||
* @param candidates the store files currently available for compaction
|
* @param candidates the store files currently available for compaction
|
||||||
|
* @throws IOException if an error occurred on the coprocessor
|
||||||
*/
|
*/
|
||||||
void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final Store store, final List<StoreFile> candidates);
|
final Store store, final List<StoreFile> candidates) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called after the {@link StoreFile}s to compact have been selected from the
|
* Called after the {@link StoreFile}s to compact have been selected from the
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.PriorityBlockingQueue;
|
import java.util.concurrent.PriorityBlockingQueue;
|
||||||
import java.util.concurrent.RejectedExecutionException;
|
import java.util.concurrent.RejectedExecutionException;
|
||||||
|
@ -148,19 +149,19 @@ public class CompactSplitThread implements CompactionRequestor {
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void requestCompaction(final HRegion r,
|
public synchronized void requestCompaction(final HRegion r,
|
||||||
final String why) {
|
final String why) throws IOException {
|
||||||
for(Store s : r.getStores().values()) {
|
for(Store s : r.getStores().values()) {
|
||||||
requestCompaction(r, s, why, Store.NO_PRIORITY);
|
requestCompaction(r, s, why, Store.NO_PRIORITY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void requestCompaction(final HRegion r, final Store s,
|
public synchronized void requestCompaction(final HRegion r, final Store s,
|
||||||
final String why) {
|
final String why) throws IOException {
|
||||||
requestCompaction(r, s, why, Store.NO_PRIORITY);
|
requestCompaction(r, s, why, Store.NO_PRIORITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void requestCompaction(final HRegion r, final String why,
|
public synchronized void requestCompaction(final HRegion r, final String why,
|
||||||
int p) {
|
int p) throws IOException {
|
||||||
for(Store s : r.getStores().values()) {
|
for(Store s : r.getStores().values()) {
|
||||||
requestCompaction(r, s, why, p);
|
requestCompaction(r, s, why, p);
|
||||||
}
|
}
|
||||||
|
@ -173,7 +174,7 @@ public class CompactSplitThread implements CompactionRequestor {
|
||||||
* @param priority override the default priority (NO_PRIORITY == decide)
|
* @param priority override the default priority (NO_PRIORITY == decide)
|
||||||
*/
|
*/
|
||||||
public synchronized void requestCompaction(final HRegion r, final Store s,
|
public synchronized void requestCompaction(final HRegion r, final Store s,
|
||||||
final String why, int priority) {
|
final String why, int priority) throws IOException {
|
||||||
if (this.server.isStopped()) {
|
if (this.server.isStopped()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -26,30 +27,34 @@ public interface CompactionRequestor {
|
||||||
/**
|
/**
|
||||||
* @param r Region to compact
|
* @param r Region to compact
|
||||||
* @param why Why compaction was requested -- used in debug messages
|
* @param why Why compaction was requested -- used in debug messages
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void requestCompaction(final HRegion r, final String why);
|
public void requestCompaction(final HRegion r, final String why) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param r Region to compact
|
* @param r Region to compact
|
||||||
* @param s Store within region to compact
|
* @param s Store within region to compact
|
||||||
* @param why Why compaction was requested -- used in debug messages
|
* @param why Why compaction was requested -- used in debug messages
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void requestCompaction(final HRegion r, final Store s, final String why);
|
public void requestCompaction(final HRegion r, final Store s, final String why) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param r Region to compact
|
* @param r Region to compact
|
||||||
* @param why Why compaction was requested -- used in debug messages
|
* @param why Why compaction was requested -- used in debug messages
|
||||||
* @param pri Priority of this compaction. minHeap. <=0 is critical
|
* @param pri Priority of this compaction. minHeap. <=0 is critical
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void requestCompaction(final HRegion r, final String why, int pri);
|
public void requestCompaction(final HRegion r, final String why, int pri) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param r Region to compact
|
* @param r Region to compact
|
||||||
* @param s Store within region to compact
|
* @param s Store within region to compact
|
||||||
* @param why Why compaction was requested -- used in debug messages
|
* @param why Why compaction was requested -- used in debug messages
|
||||||
* @param pri Priority of this compaction. minHeap. <=0 is critical
|
* @param pri Priority of this compaction. minHeap. <=0 is critical
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void requestCompaction(final HRegion r, final Store s,
|
public void requestCompaction(final HRegion r, final Store s,
|
||||||
final String why, int pri);
|
final String why, int pri) throws IOException;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -358,7 +358,13 @@ class MemStoreFlusher extends HasThread implements FlushRequester {
|
||||||
LOG.warn("Region " + region.getRegionNameAsString() + " has too many " +
|
LOG.warn("Region " + region.getRegionNameAsString() + " has too many " +
|
||||||
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
||||||
if (!this.server.compactSplitThread.requestSplit(region)) {
|
if (!this.server.compactSplitThread.requestSplit(region)) {
|
||||||
|
try {
|
||||||
this.server.compactSplitThread.requestCompaction(region, getName());
|
this.server.compactSplitThread.requestCompaction(region, getName());
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Cache flush failed" +
|
||||||
|
(region != null ? (" for region " + Bytes.toStringBinary(region.getRegionName())) : ""),
|
||||||
|
RemoteExceptionHandler.checkIOException(e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -309,15 +309,21 @@ public class RegionCoprocessorHost
|
||||||
* @param store The store where compaction is being requested
|
* @param store The store where compaction is being requested
|
||||||
* @param candidates The currently available store files
|
* @param candidates The currently available store files
|
||||||
* @return If {@code true}, skip the normal selection process and use the current list
|
* @return If {@code true}, skip the normal selection process and use the current list
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public boolean preCompactSelection(Store store, List<StoreFile> candidates) {
|
public boolean preCompactSelection(Store store, List<StoreFile> candidates) throws IOException {
|
||||||
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
|
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
|
||||||
boolean bypass = false;
|
boolean bypass = false;
|
||||||
for (RegionEnvironment env: coprocessors) {
|
for (RegionEnvironment env: coprocessors) {
|
||||||
if (env.getInstance() instanceof RegionObserver) {
|
if (env.getInstance() instanceof RegionObserver) {
|
||||||
ctx = ObserverContext.createAndPrepare(env, ctx);
|
ctx = ObserverContext.createAndPrepare(env, ctx);
|
||||||
|
try {
|
||||||
((RegionObserver)env.getInstance()).preCompactSelection(
|
((RegionObserver)env.getInstance()).preCompactSelection(
|
||||||
ctx, store, candidates);
|
ctx, store, candidates);
|
||||||
|
} catch (Throwable e) {
|
||||||
|
handleCoprocessorThrowable(env,e);
|
||||||
|
|
||||||
|
}
|
||||||
bypass |= ctx.shouldBypass();
|
bypass |= ctx.shouldBypass();
|
||||||
if (ctx.shouldComplete()) {
|
if (ctx.shouldComplete()) {
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1241,11 +1241,11 @@ public class Store extends SchemaConfigured implements HeapSize {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
public CompactionRequest requestCompaction() {
|
public CompactionRequest requestCompaction() throws IOException {
|
||||||
return requestCompaction(NO_PRIORITY);
|
return requestCompaction(NO_PRIORITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public CompactionRequest requestCompaction(int priority) {
|
public CompactionRequest requestCompaction(int priority) throws IOException {
|
||||||
// don't even select for compaction if writes are disabled
|
// don't even select for compaction if writes are disabled
|
||||||
if (!this.region.areWritesEnabled()) {
|
if (!this.region.areWritesEnabled()) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -1309,9 +1309,6 @@ public class Store extends SchemaConfigured implements HeapSize {
|
||||||
int pri = getCompactPriority(priority);
|
int pri = getCompactPriority(priority);
|
||||||
ret = new CompactionRequest(region, this, filesToCompact, isMajor, pri);
|
ret = new CompactionRequest(region, this, filesToCompact, isMajor, pri);
|
||||||
}
|
}
|
||||||
} catch (IOException ex) {
|
|
||||||
LOG.error("Compaction Request failed for region " + region + ", store "
|
|
||||||
+ this, RemoteExceptionHandler.checkIOException(ex));
|
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
|
@ -795,6 +796,12 @@ public class AccessController extends BaseRegionObserver
|
||||||
return scanner;
|
return scanner;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> e,
|
||||||
|
final Store store, final List<StoreFile> candidates) throws IOException {
|
||||||
|
requirePermission(getTableName(e.getEnvironment()), null, null, Action.ADMIN);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
public void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
|
||||||
final byte [] row, final byte [] family, final Result result)
|
final byte [] row, final byte [] family, final Result result)
|
||||||
|
|
|
@ -474,6 +474,19 @@ public class TestAccessController {
|
||||||
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE);
|
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPreCompactSelection() throws Exception {
|
||||||
|
PrivilegedExceptionAction action = new PrivilegedExceptionAction() {
|
||||||
|
public Object run() throws Exception {
|
||||||
|
ACCESS_CONTROLLER.preCompactSelection(ObserverContext.createAndPrepare(RCP_ENV, null), null, null);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER);
|
||||||
|
verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE);
|
||||||
|
}
|
||||||
|
|
||||||
private void verifyRead(PrivilegedExceptionAction action) throws Exception {
|
private void verifyRead(PrivilegedExceptionAction action) throws Exception {
|
||||||
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_RW, USER_RO);
|
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_RW, USER_RO);
|
||||||
verifyDenied(action, USER_NONE, USER_CREATE);
|
verifyDenied(action, USER_NONE, USER_CREATE);
|
||||||
|
|
Loading…
Reference in New Issue