HBASE-15173 Execute mergeRegions RPC call as the request user

This commit is contained in:
tedyu 2016-01-28 10:02:49 -08:00
parent dfa9484137
commit 486f7612be
8 changed files with 46 additions and 17 deletions

View File

@ -23,12 +23,14 @@ import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpeci
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException; import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType; import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type; import java.lang.reflect.Type;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
@ -138,6 +140,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType;
import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.hadoop.hbase.security.access.UserPermission;
@ -1896,17 +1899,34 @@ public final class ProtobufUtil {
* @param region_b * @param region_b
* @param forcible true if do a compulsory merge, otherwise we will only merge * @param forcible true if do a compulsory merge, otherwise we will only merge
* two adjacent regions * two adjacent regions
* @param user effective user
* @throws IOException * @throws IOException
*/ */
public static void mergeRegions(final AdminService.BlockingInterface admin, public static void mergeRegions(final AdminService.BlockingInterface admin,
final HRegionInfo region_a, final HRegionInfo region_b, final HRegionInfo region_a, final HRegionInfo region_b,
final boolean forcible) throws IOException { final boolean forcible, final User user) throws IOException {
MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest( final MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest(
region_a.getRegionName(), region_b.getRegionName(),forcible); region_a.getRegionName(), region_b.getRegionName(),forcible);
try { if (user != null) {
admin.mergeRegions(null, request); try {
} catch (ServiceException se) { user.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
throw ProtobufUtil.getRemoteException(se); @Override
public Void run() throws Exception {
admin.mergeRegions(null, request);
return null;
}
});
} catch (InterruptedException ie) {
InterruptedIOException iioe = new InterruptedIOException();
iioe.initCause(ie);
throw iioe;
}
} else {
try {
admin.mergeRegions(null, request);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
} }
} }

View File

@ -137,6 +137,7 @@ import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -1406,10 +1407,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
@Override @Override
public void dispatchMergingRegions(final HRegionInfo region_a, public void dispatchMergingRegions(final HRegionInfo region_a,
final HRegionInfo region_b, final boolean forcible) throws IOException { final HRegionInfo region_b, final boolean forcible, final User user) throws IOException {
checkInitialized(); checkInitialized();
this.service.submit(new DispatchMergingRegionHandler(this, this.service.submit(new DispatchMergingRegionHandler(this,
this.catalogJanitorChore, region_a, region_b, forcible)); this.catalogJanitorChore, region_a, region_b, forcible, user));
} }
void move(final byte[] encodedRegionName, void move(final byte[] encodedRegionName,

View File

@ -624,7 +624,7 @@ public class MasterRpcServices extends RSRpcServices
} }
try { try {
master.dispatchMergingRegions(regionInfoA, regionInfoB, forcible); master.dispatchMergingRegions(regionInfoA, regionInfoB, forcible, RpcServer.getRequestUser());
master.cpHost.postDispatchMerge(regionInfoA, regionInfoB); master.cpHost.postDispatchMerge(regionInfoA, regionInfoB);
} catch (IOException ioe) { } catch (IOException ioe) {
throw new ServiceException(ioe); throw new ServiceException(ioe);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.security.User;
import com.google.protobuf.Service; import com.google.protobuf.Service;
@ -257,10 +258,11 @@ public interface MasterServices extends Server {
* @param region_b region to merge * @param region_b region to merge
* @param forcible true if do a compulsory merge, otherwise we will only merge * @param forcible true if do a compulsory merge, otherwise we will only merge
* two adjacent regions * two adjacent regions
* @param user effective user
* @throws IOException * @throws IOException
*/ */
void dispatchMergingRegions( void dispatchMergingRegions(
final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible, final User user
) throws IOException; ) throws IOException;
/** /**
@ -365,7 +367,7 @@ public interface MasterServices extends Server {
public List<TableName> listTableNamesByNamespace(String name) throws IOException; public List<TableName> listTableNamesByNamespace(String name) throws IOException;
/** /**
* @param table * @param table the table for which last successful major compaction time is queried
* @return the timestamp of the last successful major compaction for the passed table, * @return the timestamp of the last successful major compaction for the passed table,
* or 0 if no HFile resulting from a major compaction exists * or 0 if no HFile resulting from a major compaction exists
* @throws IOException * @throws IOException

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSeque
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.RetryCounter;
@ -889,7 +890,7 @@ public class ServerManager {
* @throws IOException * @throws IOException
*/ */
public void sendRegionsMerge(ServerName server, HRegionInfo region_a, public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
HRegionInfo region_b, boolean forcible) throws IOException { HRegionInfo region_b, boolean forcible, User user) throws IOException {
if (server == null) if (server == null)
throw new NullPointerException("Passed server is null"); throw new NullPointerException("Passed server is null");
if (region_a == null || region_b == null) if (region_a == null || region_b == null)
@ -902,7 +903,7 @@ public class ServerManager {
+ region_b.getRegionNameAsString() + region_b.getRegionNameAsString()
+ " failed because no RPC connection found to this server"); + " failed because no RPC connection found to this server");
} }
ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible); ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible, user);
} }
/** /**

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/** /**
@ -55,16 +56,18 @@ public class DispatchMergingRegionHandler extends EventHandler {
private HRegionInfo region_b; private HRegionInfo region_b;
private final boolean forcible; private final boolean forcible;
private final int timeout; private final int timeout;
private final User user;
public DispatchMergingRegionHandler(final MasterServices services, public DispatchMergingRegionHandler(final MasterServices services,
final CatalogJanitor catalogJanitor, final HRegionInfo region_a, final CatalogJanitor catalogJanitor, final HRegionInfo region_a,
final HRegionInfo region_b, final boolean forcible) { final HRegionInfo region_b, final boolean forcible, final User user) {
super(services, EventType.C_M_MERGE_REGION); super(services, EventType.C_M_MERGE_REGION);
this.masterServices = services; this.masterServices = services;
this.catalogJanitor = catalogJanitor; this.catalogJanitor = catalogJanitor;
this.region_a = region_a; this.region_a = region_a;
this.region_b = region_b; this.region_b = region_b;
this.forcible = forcible; this.forcible = forcible;
this.user = user;
this.timeout = server.getConfiguration().getInt( this.timeout = server.getConfiguration().getInt(
"hbase.master.regionmerge.timeout", 120 * 1000); "hbase.master.regionmerge.timeout", 120 * 1000);
} }
@ -148,7 +151,7 @@ public class DispatchMergingRegionHandler extends EventHandler {
while (!masterServices.isStopped()) { while (!masterServices.isStopped()) {
try { try {
masterServices.getServerManager().sendRegionsMerge(region_a_location, masterServices.getServerManager().sendRegionsMerge(region_a_location,
region_a, region_b, forcible); region_a, region_b, forcible, user);
LOG.info("Sent merge to server " + region_a_location + " for region " + LOG.info("Sent merge to server " + region_a_location + " for region " +
region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible); region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible);
break; break;

View File

@ -1256,7 +1256,8 @@ public class TestAdmin1 {
try { try {
AdminService.BlockingInterface admin = TEST_UTIL.getHBaseAdmin().getConnection() AdminService.BlockingInterface admin = TEST_UTIL.getHBaseAdmin().getConnection()
.getAdmin(regions.get(1).getSecond()); .getAdmin(regions.get(1).getSecond());
ProtobufUtil.mergeRegions(admin, regions.get(1).getFirst(), regions.get(2).getFirst(), true); ProtobufUtil.mergeRegions(admin, regions.get(1).getFirst(), regions.get(2).getFirst(), true,
null);
} catch (MergeRegionException mm) { } catch (MergeRegionException mm) {
gotException = true; gotException = true;
} }

View File

@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResul
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@ -512,7 +513,7 @@ public class TestCatalogJanitor {
@Override @Override
public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
boolean forcible) throws IOException { boolean forcible, User user) throws IOException {
} }
@Override @Override