HBASE-15198 RPC client not using Codec and CellBlock for puts by default.

This commit is contained in:
anoopsjohn 2016-02-11 16:37:20 +05:30
parent cd2b4dfa12
commit 19f8faeb88
9 changed files with 36 additions and 43 deletions

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
@ -306,4 +305,9 @@ public interface ClusterConnection extends HConnection {
*/ */
public MetricsConnection getConnectionMetrics(); public MetricsConnection getConnectionMetrics();
/**
* @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
* supports cell blocks.
*/
boolean hasCellBlockSupport();
} }

View File

@ -469,4 +469,9 @@ abstract class ConnectionAdapter implements ClusterConnection {
public ClientBackoffPolicy getBackoffPolicy() { public ClientBackoffPolicy getBackoffPolicy() {
return wrappedConnection.getBackoffPolicy(); return wrappedConnection.getBackoffPolicy();
} }
@Override
public boolean hasCellBlockSupport() {
return wrappedConnection.hasCellBlockSupport();
}
} }

View File

@ -57,8 +57,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.RetryImmediatelyException;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotEnabledException;
@ -73,7 +71,6 @@ import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcClientFactory;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
@ -184,7 +181,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequ
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.quotas.ThrottlingException;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.UserProvider;
@ -2620,6 +2616,11 @@ class ConnectionManager {
public boolean isManaged() { public boolean isManaged() {
return managed; return managed;
} }
@Override
public boolean hasCellBlockSupport() {
return this.rpcClient.hasCellBlockSupport();
}
} }
/** /**

View File

@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
@ -152,11 +151,8 @@ class MultiServerCallable<R> extends RegionServerCallable<MultiResponse> impleme
// This is not exact -- the configuration could have changed on us after connection was set up // This is not exact -- the configuration could have changed on us after connection was set up
// but it will do for now. // but it will do for now.
HConnection connection = getConnection(); HConnection connection = getConnection();
if (connection == null) return true; // Default is to do cellblocks. if (!(connection instanceof ClusterConnection)) return true; // Default is to do cellblocks.
Configuration configuration = connection.getConfiguration(); return ((ClusterConnection) connection).hasCellBlockSupport();
if (configuration == null) return true;
String codec = configuration.get(HConstants.RPC_CODEC_CONF_KEY, "");
return codec != null && codec.length() > 0;
} }
@Override @Override

View File

@ -149,6 +149,11 @@ public abstract class AbstractRpcClient implements RpcClient {
} }
} }
@Override
public boolean hasCellBlockSupport() {
return this.codec != null;
}
/** /**
* Encapsulate the ugly casting and RuntimeException conversion in private method. * Encapsulate the ugly casting and RuntimeException conversion in private method.
* @param conf configuration * @param conf configuration

View File

@ -83,4 +83,10 @@ import java.io.IOException;
* using this client. * using this client.
*/ */
@Override public void close(); @Override public void close();
/**
* @return true when this client uses a {@link org.apache.hadoop.hbase.codec.Codec} and so
* supports cell blocks.
*/
boolean hasCellBlockSupport();
} }

View File

@ -73,7 +73,6 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.io.LimitInputStream; import org.apache.hadoop.hbase.io.LimitInputStream;
import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
@ -1206,10 +1205,6 @@ public final class ProtobufUtil {
valueBuilder.setValue(ByteStringer.wrap( valueBuilder.setValue(ByteStringer.wrap(
cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
valueBuilder.setTimestamp(cell.getTimestamp()); valueBuilder.setTimestamp(cell.getTimestamp());
if(cell.getTagsLength() > 0) {
valueBuilder.setTags(ByteStringer.wrap(cell.getTagsArray(), cell.getTagsOffset(),
cell.getTagsLength()));
}
if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) {
KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte());
valueBuilder.setDeleteType(toDeleteType(keyValueType)); valueBuilder.setDeleteType(toDeleteType(keyValueType));

View File

@ -21,8 +21,6 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
@ -680,8 +678,15 @@ public final class RequestConverter {
cells.add(i); cells.add(i);
builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(
MutationType.INCREMENT, i, mutationBuilder, action.getNonce()))); MutationType.INCREMENT, i, mutationBuilder, action.getNonce())));
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
builder.addAction(actionBuilder.setServiceCall(ClientProtos.CoprocessorServiceCall
.newBuilder().setRow(ByteStringer.wrap(exec.getRow()))
.setServiceName(exec.getMethod().getService().getFullName())
.setMethodName(exec.getMethod().getName())
.setRequest(exec.getRequest().toByteString())));
} else if (row instanceof RowMutations) { } else if (row instanceof RowMutations) {
continue; // ignore RowMutations throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
} else { } else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName()); throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
} }

View File

@ -2505,30 +2505,6 @@ public class TestAccessController extends SecureTestUtil {
} }
} }
@Test (timeout=180000)
public void testReservedCellTags() throws Exception {
AccessTestAction putWithReservedTag = new AccessTestAction() {
@Override
public Object run() throws Exception {
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE);) {
KeyValue kv = new KeyValue(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER,
HConstants.LATEST_TIMESTAMP, HConstants.EMPTY_BYTE_ARRAY,
new Tag[] { new Tag(AccessControlLists.ACL_TAG_TYPE,
ProtobufUtil.toUsersAndPermissions(USER_OWNER.getShortName(),
new Permission(Permission.Action.READ)).toByteArray()) });
t.put(new Put(TEST_ROW).add(kv));
}
return null;
}
};
// Current user is superuser
verifyAllowed(putWithReservedTag, User.getCurrent());
// No other user should be allowed
verifyDenied(putWithReservedTag, USER_OWNER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO);
}
@Test (timeout=180000) @Test (timeout=180000)
public void testSetQuota() throws Exception { public void testSetQuota() throws Exception {
AccessTestAction setUserQuotaAction = new AccessTestAction() { AccessTestAction setUserQuotaAction = new AccessTestAction() {