From ed9218145fc4a4f1eebf08735280e69a5ceccd53 Mon Sep 17 00:00:00 2001 From: Zhihong Yu Date: Wed, 21 Nov 2012 23:11:54 +0000 Subject: [PATCH] HBASE-6787 Convert RowProcessorProtocol to protocol buffer service (Devaraj) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1412356 13f79535-47bb-0310-9956-ffa450edef68 --- .../MultiRowMutationProcessorProtos.java | 667 +++++++++ .../generated/RowProcessorProtos.java | 1262 +++++++++++++++++ .../MultiRowMutationProcessorMessages.proto | 32 + .../src/main/protobuf/RowProcessor.proto | 41 + .../client/coprocessor/AggregationClient.java | 7 +- .../coprocessor/RowProcessorClient.java} | 35 +- .../coprocessor/BaseRowProcessorEndpoint.java | 113 +- .../hbase/coprocessor/ColumnInterpreter.java | 4 +- .../hbase/regionserver/BaseRowProcessor.java | 10 +- .../hadoop/hbase/regionserver/HRegion.java | 6 +- .../MultiRowMutationProcessor.java | 19 +- .../hbase/regionserver/RowProcessor.java | 35 +- .../coprocessor/TestRowProcessorEndpoint.java | 196 ++- .../protobuf/IncrementCounterProcessor.proto | 55 + 14 files changed, 2362 insertions(+), 120 deletions(-) create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProcessorProtos.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java create mode 100644 hbase-protocol/src/main/protobuf/MultiRowMutationProcessorMessages.proto create mode 100644 hbase-protocol/src/main/protobuf/RowProcessor.proto rename hbase-server/src/main/java/org/apache/hadoop/hbase/{coprocessor/RowProcessorProtocol.java => client/coprocessor/RowProcessorClient.java} (54%) create mode 100644 hbase-server/src/test/protobuf/IncrementCounterProcessor.proto diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProcessorProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProcessorProtos.java new file mode 100644 index 00000000000..18582d91fd2 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProcessorProtos.java @@ -0,0 +1,667 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: MultiRowMutationProcessorMessages.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class MultiRowMutationProcessorProtos { + private MultiRowMutationProcessorProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface MultiRowMutationProcessorRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class MultiRowMutationProcessorRequest extends + com.google.protobuf.GeneratedMessage + implements MultiRowMutationProcessorRequestOrBuilder { + // Use MultiRowMutationProcessorRequest.newBuilder() to construct. + private MultiRowMutationProcessorRequest(Builder builder) { + super(builder); + } + private MultiRowMutationProcessorRequest(boolean noInit) {} + + private static final MultiRowMutationProcessorRequest defaultInstance; + public static MultiRowMutationProcessorRequest getDefaultInstance() { + return defaultInstance; + } + + public MultiRowMutationProcessorRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest other = (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest result = new org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:MultiRowMutationProcessorRequest) + } + + static { + defaultInstance = new MultiRowMutationProcessorRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MultiRowMutationProcessorRequest) + } + + public interface MultiRowMutationProcessorResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class MultiRowMutationProcessorResponse extends + com.google.protobuf.GeneratedMessage + implements MultiRowMutationProcessorResponseOrBuilder { + // Use MultiRowMutationProcessorResponse.newBuilder() to construct. + private MultiRowMutationProcessorResponse(Builder builder) { + super(builder); + } + private MultiRowMutationProcessorResponse(boolean noInit) {} + + private static final MultiRowMutationProcessorResponse defaultInstance; + public static MultiRowMutationProcessorResponse getDefaultInstance() { + return defaultInstance; + } + + public MultiRowMutationProcessorResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse other = (org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.internal_static_MultiRowMutationProcessorResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse result = new org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:MultiRowMutationProcessorResponse) + } + + static { + defaultInstance = new MultiRowMutationProcessorResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MultiRowMutationProcessorResponse) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MultiRowMutationProcessorRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MultiRowMutationProcessorRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MultiRowMutationProcessorResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MultiRowMutationProcessorResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\'MultiRowMutationProcessorMessages.prot" + + "o\"\"\n MultiRowMutationProcessorRequest\"#\n" + + "!MultiRowMutationProcessorResponseBR\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\037MultiRowMutationProcessorProtosH\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_MultiRowMutationProcessorRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_MultiRowMutationProcessorRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MultiRowMutationProcessorRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest.Builder.class); + internal_static_MultiRowMutationProcessorResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_MultiRowMutationProcessorResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MultiRowMutationProcessorResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java new file mode 100644 index 00000000000..162639e1f22 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java @@ -0,0 +1,1262 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RowProcessor.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RowProcessorProtos { + private RowProcessorProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RowProcessorRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string rowProcessorClassName = 1; + boolean hasRowProcessorClassName(); + String getRowProcessorClassName(); + + // optional string rowProcessorInitializerMessageName = 2; + boolean hasRowProcessorInitializerMessageName(); + String getRowProcessorInitializerMessageName(); + + // optional bytes rowProcessorInitializerMessage = 3; + boolean hasRowProcessorInitializerMessage(); + com.google.protobuf.ByteString getRowProcessorInitializerMessage(); + } + public static final class RowProcessorRequest extends + com.google.protobuf.GeneratedMessage + implements RowProcessorRequestOrBuilder { + // Use RowProcessorRequest.newBuilder() to construct. + private RowProcessorRequest(Builder builder) { + super(builder); + } + private RowProcessorRequest(boolean noInit) {} + + private static final RowProcessorRequest defaultInstance; + public static RowProcessorRequest getDefaultInstance() { + return defaultInstance; + } + + public RowProcessorRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string rowProcessorClassName = 1; + public static final int ROWPROCESSORCLASSNAME_FIELD_NUMBER = 1; + private java.lang.Object rowProcessorClassName_; + public boolean hasRowProcessorClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getRowProcessorClassName() { + java.lang.Object ref = rowProcessorClassName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + rowProcessorClassName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getRowProcessorClassNameBytes() { + java.lang.Object ref = rowProcessorClassName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + rowProcessorClassName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string rowProcessorInitializerMessageName = 2; + public static final int ROWPROCESSORINITIALIZERMESSAGENAME_FIELD_NUMBER = 2; + private java.lang.Object rowProcessorInitializerMessageName_; + public boolean hasRowProcessorInitializerMessageName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getRowProcessorInitializerMessageName() { + java.lang.Object ref = rowProcessorInitializerMessageName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + rowProcessorInitializerMessageName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getRowProcessorInitializerMessageNameBytes() { + java.lang.Object ref = rowProcessorInitializerMessageName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + rowProcessorInitializerMessageName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bytes rowProcessorInitializerMessage = 3; + public static final int ROWPROCESSORINITIALIZERMESSAGE_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString rowProcessorInitializerMessage_; + public boolean hasRowProcessorInitializerMessage() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getRowProcessorInitializerMessage() { + return rowProcessorInitializerMessage_; + } + + private void initFields() { + rowProcessorClassName_ = ""; + rowProcessorInitializerMessageName_ = ""; + rowProcessorInitializerMessage_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRowProcessorClassName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRowProcessorClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getRowProcessorInitializerMessageNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, rowProcessorInitializerMessage_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRowProcessorClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getRowProcessorInitializerMessageNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, rowProcessorInitializerMessage_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest other = (org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest) obj; + + boolean result = true; + result = result && (hasRowProcessorClassName() == other.hasRowProcessorClassName()); + if (hasRowProcessorClassName()) { + result = result && getRowProcessorClassName() + .equals(other.getRowProcessorClassName()); + } + result = result && (hasRowProcessorInitializerMessageName() == other.hasRowProcessorInitializerMessageName()); + if (hasRowProcessorInitializerMessageName()) { + result = result && getRowProcessorInitializerMessageName() + .equals(other.getRowProcessorInitializerMessageName()); + } + result = result && (hasRowProcessorInitializerMessage() == other.hasRowProcessorInitializerMessage()); + if (hasRowProcessorInitializerMessage()) { + result = result && getRowProcessorInitializerMessage() + .equals(other.getRowProcessorInitializerMessage()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRowProcessorClassName()) { + hash = (37 * hash) + ROWPROCESSORCLASSNAME_FIELD_NUMBER; + hash = (53 * hash) + getRowProcessorClassName().hashCode(); + } + if (hasRowProcessorInitializerMessageName()) { + hash = (37 * hash) + ROWPROCESSORINITIALIZERMESSAGENAME_FIELD_NUMBER; + hash = (53 * hash) + getRowProcessorInitializerMessageName().hashCode(); + } + if (hasRowProcessorInitializerMessage()) { + hash = (37 * hash) + ROWPROCESSORINITIALIZERMESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getRowProcessorInitializerMessage().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rowProcessorClassName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + rowProcessorInitializerMessageName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + rowProcessorInitializerMessage_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest result = new org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rowProcessorClassName_ = rowProcessorClassName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.rowProcessorInitializerMessageName_ = rowProcessorInitializerMessageName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.rowProcessorInitializerMessage_ = rowProcessorInitializerMessage_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.getDefaultInstance()) return this; + if (other.hasRowProcessorClassName()) { + setRowProcessorClassName(other.getRowProcessorClassName()); + } + if (other.hasRowProcessorInitializerMessageName()) { + setRowProcessorInitializerMessageName(other.getRowProcessorInitializerMessageName()); + } + if (other.hasRowProcessorInitializerMessage()) { + setRowProcessorInitializerMessage(other.getRowProcessorInitializerMessage()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRowProcessorClassName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rowProcessorClassName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + rowProcessorInitializerMessageName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + rowProcessorInitializerMessage_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string rowProcessorClassName = 1; + private java.lang.Object rowProcessorClassName_ = ""; + public boolean hasRowProcessorClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getRowProcessorClassName() { + java.lang.Object ref = rowProcessorClassName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + rowProcessorClassName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setRowProcessorClassName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rowProcessorClassName_ = value; + onChanged(); + return this; + } + public Builder clearRowProcessorClassName() { + bitField0_ = (bitField0_ & ~0x00000001); + rowProcessorClassName_ = getDefaultInstance().getRowProcessorClassName(); + onChanged(); + return this; + } + void setRowProcessorClassName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + rowProcessorClassName_ = value; + onChanged(); + } + + // optional string rowProcessorInitializerMessageName = 2; + private java.lang.Object rowProcessorInitializerMessageName_ = ""; + public boolean hasRowProcessorInitializerMessageName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getRowProcessorInitializerMessageName() { + java.lang.Object ref = rowProcessorInitializerMessageName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + rowProcessorInitializerMessageName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setRowProcessorInitializerMessageName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + rowProcessorInitializerMessageName_ = value; + onChanged(); + return this; + } + public Builder clearRowProcessorInitializerMessageName() { + bitField0_ = (bitField0_ & ~0x00000002); + rowProcessorInitializerMessageName_ = getDefaultInstance().getRowProcessorInitializerMessageName(); + onChanged(); + return this; + } + void setRowProcessorInitializerMessageName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + rowProcessorInitializerMessageName_ = value; + onChanged(); + } + + // optional bytes rowProcessorInitializerMessage = 3; + private com.google.protobuf.ByteString rowProcessorInitializerMessage_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRowProcessorInitializerMessage() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getRowProcessorInitializerMessage() { + return rowProcessorInitializerMessage_; + } + public Builder setRowProcessorInitializerMessage(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rowProcessorInitializerMessage_ = value; + onChanged(); + return this; + } + public Builder clearRowProcessorInitializerMessage() { + bitField0_ = (bitField0_ & ~0x00000004); + rowProcessorInitializerMessage_ = getDefaultInstance().getRowProcessorInitializerMessage(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RowProcessorRequest) + } + + static { + defaultInstance = new RowProcessorRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RowProcessorRequest) + } + + public interface RowProcessorResultOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes rowProcessorResult = 1; + boolean hasRowProcessorResult(); + com.google.protobuf.ByteString getRowProcessorResult(); + } + public static final class RowProcessorResult extends + com.google.protobuf.GeneratedMessage + implements RowProcessorResultOrBuilder { + // Use RowProcessorResult.newBuilder() to construct. + private RowProcessorResult(Builder builder) { + super(builder); + } + private RowProcessorResult(boolean noInit) {} + + private static final RowProcessorResult defaultInstance; + public static RowProcessorResult getDefaultInstance() { + return defaultInstance; + } + + public RowProcessorResult getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorResult_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorResult_fieldAccessorTable; + } + + private int bitField0_; + // required bytes rowProcessorResult = 1; + public static final int ROWPROCESSORRESULT_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString rowProcessorResult_; + public boolean hasRowProcessorResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getRowProcessorResult() { + return rowProcessorResult_; + } + + private void initFields() { + rowProcessorResult_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRowProcessorResult()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, rowProcessorResult_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, rowProcessorResult_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult other = (org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult) obj; + + boolean result = true; + result = result && (hasRowProcessorResult() == other.hasRowProcessorResult()); + if (hasRowProcessorResult()) { + result = result && getRowProcessorResult() + .equals(other.getRowProcessorResult()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRowProcessorResult()) { + hash = (37 * hash) + ROWPROCESSORRESULT_FIELD_NUMBER; + hash = (53 * hash) + getRowProcessorResult().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResultOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorResult_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.internal_static_RowProcessorResult_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rowProcessorResult_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult build() { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult result = new org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rowProcessorResult_ = rowProcessorResult_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance()) return this; + if (other.hasRowProcessorResult()) { + setRowProcessorResult(other.getRowProcessorResult()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRowProcessorResult()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rowProcessorResult_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required bytes rowProcessorResult = 1; + private com.google.protobuf.ByteString rowProcessorResult_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRowProcessorResult() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getRowProcessorResult() { + return rowProcessorResult_; + } + public Builder setRowProcessorResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rowProcessorResult_ = value; + onChanged(); + return this; + } + public Builder clearRowProcessorResult() { + bitField0_ = (bitField0_ & ~0x00000001); + rowProcessorResult_ = getDefaultInstance().getRowProcessorResult(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RowProcessorResult) + } + + static { + defaultInstance = new RowProcessorResult(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RowProcessorResult) + } + + public static abstract class RowProcessorService + implements com.google.protobuf.Service { + protected RowProcessorService() {} + + public interface Interface { + public abstract void process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new RowProcessorService() { + @java.lang.Override + public void process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request, + com.google.protobuf.RpcCallback done) { + impl.process(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.process(controller, (org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.process(controller, (org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.class, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult process( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.getDefaultInstance()); + } + + } + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RowProcessorRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RowProcessorRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RowProcessorResult_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RowProcessorResult_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\022RowProcessor.proto\"\210\001\n\023RowProcessorReq" + + "uest\022\035\n\025rowProcessorClassName\030\001 \002(\t\022*\n\"r" + + "owProcessorInitializerMessageName\030\002 \001(\t\022" + + "&\n\036rowProcessorInitializerMessage\030\003 \001(\014\"" + + "0\n\022RowProcessorResult\022\032\n\022rowProcessorRes" + + "ult\030\001 \002(\0142K\n\023RowProcessorService\0224\n\007proc" + + "ess\022\024.RowProcessorRequest\032\023.RowProcessor" + + "ResultBH\n*org.apache.hadoop.hbase.protob" + + "uf.generatedB\022RowProcessorProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_RowProcessorRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_RowProcessorRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RowProcessorRequest_descriptor, + new java.lang.String[] { "RowProcessorClassName", "RowProcessorInitializerMessageName", "RowProcessorInitializerMessage", }, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.class, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest.Builder.class); + internal_static_RowProcessorResult_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_RowProcessorResult_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RowProcessorResult_descriptor, + new java.lang.String[] { "RowProcessorResult", }, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.class, + org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult.Builder.class); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/MultiRowMutationProcessorMessages.proto b/hbase-protocol/src/main/protobuf/MultiRowMutationProcessorMessages.proto new file mode 100644 index 00000000000..cca5f55f6d3 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/MultiRowMutationProcessorMessages.proto @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines a protocol to perform multi row transactions. + * See BaseRowProcessorEndpoint for the implementation. + * See HRegion#processRowsWithLocks() for details. + */ +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "MultiRowMutationProcessorProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message MultiRowMutationProcessorRequest{ +} + +message MultiRowMutationProcessorResponse{ +} \ No newline at end of file diff --git a/hbase-protocol/src/main/protobuf/RowProcessor.proto b/hbase-protocol/src/main/protobuf/RowProcessor.proto new file mode 100644 index 00000000000..ff2588ae5fe --- /dev/null +++ b/hbase-protocol/src/main/protobuf/RowProcessor.proto @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Defines a protocol to perform multi row transactions. + * See BaseRowProcessorEndpoint for the implementation. + * See HRegion#processRowsWithLocks() for details. + */ +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RowProcessorProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message RowProcessorRequest { + required string rowProcessorClassName = 1; + optional string rowProcessorInitializerMessageName = 2; + optional bytes rowProcessorInitializerMessage = 3; +} + +message RowProcessorResult { + required bytes rowProcessorResult = 1; +} + +service RowProcessorService { + rpc process (RowProcessorRequest) returns (RowProcessorResult); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 02106d3658f..2e0c05e1068 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -678,9 +678,10 @@ public class AggregationClient { final AggregateArgument.Builder requestBuilder = AggregateArgument.newBuilder(); requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); - if (ci.columnInterpreterSpecificData() != null) { - requestBuilder.setInterpreterSpecificBytes( - ci.columnInterpreterSpecificData()); + ByteString columnInterpreterSpecificData = null; + if ((columnInterpreterSpecificData = ci.columnInterpreterSpecificData()) + != null) { + requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData); } requestBuilder.setScan(ProtobufUtil.toScan(scan)); return requestBuilder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RowProcessorProtocol.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java similarity index 54% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RowProcessorProtocol.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java index c670c39fdd8..747d511d61a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RowProcessorProtocol.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/coprocessor/RowProcessorClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,27 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.coprocessor; + +package org.apache.hadoop.hbase.client.coprocessor; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest; import org.apache.hadoop.hbase.regionserver.RowProcessor; +import com.google.protobuf.Message; /** - * Defines a protocol to perform multi row transactions. - * See {@link BaseRowProcessorEndpoint} for the implementation. - * See {@link HRegion#processRowsWithLocks()} for detials. + * Convenience class that is used to make RowProcessorEndpoint invocations. + * For example usage, refer TestRowProcessorEndpoint + * */ @InterfaceAudience.Public @InterfaceStability.Evolving -public interface RowProcessorProtocol extends CoprocessorProtocol { - - /** - * @param processor The processor defines how to process the row - */ - T process(RowProcessor processor) throws IOException; +public class RowProcessorClient { + public static + RowProcessorRequest getRowProcessorPB(RowProcessor r) + throws IOException { + final RowProcessorRequest.Builder requestBuilder = + RowProcessorRequest.newBuilder(); + requestBuilder.setRowProcessorClassName(r.getClass().getName()); + S s = r.getRequestData(); + if (s != null) { + requestBuilder.setRowProcessorInitializerMessageName(s.getClass().getName()); + requestBuilder.setRowProcessorInitializerMessage(s.toByteString()); + } + return requestBuilder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java index 9ee63ffa94d..368f1f7b42b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java @@ -18,21 +18,35 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RowProcessor; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + /** * This class demonstrates how to implement atomic read-modify-writes * using {@link HRegion#processRowsWithLocks()} and Coprocessor endpoints. */ @InterfaceAudience.Public @InterfaceStability.Evolving -public abstract class BaseRowProcessorEndpoint extends BaseEndpointCoprocessor - implements RowProcessorProtocol { - +public abstract class BaseRowProcessorEndpoint +extends RowProcessorService implements CoprocessorService, Coprocessor { + private RegionCoprocessorEnvironment env; /** * Pass a processor to HRegion to process multiple rows atomically. * @@ -42,16 +56,93 @@ public abstract class BaseRowProcessorEndpoint extends BaseEndpointCoprocessor * * See {@link TestRowProcessorEndpoint} for example. * - * @param processor The object defines the read-modify-write procedure - * @return The processing result + * The request contains information for constructing processor + * (see {@link #constructRowProcessorFromRequest}. The processor object defines + * the read-modify-write procedure. */ @Override - public T process(RowProcessor processor) - throws IOException { - HRegion region = - ((RegionCoprocessorEnvironment) getEnvironment()).getRegion(); - region.processRowsWithLocks(processor); - return processor.getResult(); + public void process(RpcController controller, RowProcessorRequest request, + RpcCallback done) { + RowProcessorResult resultProto = null; + try { + RowProcessor processor = constructRowProcessorFromRequest(request); + HRegion region = env.getRegion(); + region.processRowsWithLocks(processor); + T result = processor.getResult(); + RowProcessorResult.Builder b = RowProcessorResult.newBuilder(); + b.setRowProcessorResult(result.toByteString()); + resultProto = b.build(); + } catch (Exception e) { + ResponseConverter.setControllerException(controller, new IOException(e)); + } + done.run(resultProto); } + @Override + public Service getService() { + return this; + } + + /** + * Stores a reference to the coprocessor environment provided by the + * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded + * on a table region, so always expects this to be an instance of + * {@link RegionCoprocessorEnvironment}. + * @param env the environment provided by the coprocessor host + * @throws IOException if the provided environment is not an instance of + * {@code RegionCoprocessorEnvironment} + */ + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (env instanceof RegionCoprocessorEnvironment) { + this.env = (RegionCoprocessorEnvironment)env; + } else { + throw new CoprocessorException("Must be loaded on a table region!"); + } + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + // nothing to do + } + + @SuppressWarnings("unchecked") + RowProcessor constructRowProcessorFromRequest(RowProcessorRequest request) + throws IOException { + String className = request.getRowProcessorClassName(); + Class cls; + try { + cls = Class.forName(className); + RowProcessor ci = (RowProcessor) cls.newInstance(); + if (request.hasRowProcessorInitializerMessageName()) { + Class imn = Class.forName(request.getRowProcessorInitializerMessageName()) + .asSubclass(Message.class); + Method m; + try { + m = imn.getMethod("parseFrom", ByteString.class); + } catch (SecurityException e) { + throw new IOException(e); + } catch (NoSuchMethodException e) { + throw new IOException(e); + } + S s; + try { + s = (S)m.invoke(null,request.getRowProcessorInitializerMessage()); + } catch (IllegalArgumentException e) { + throw new IOException(e); + } catch (InvocationTargetException e) { + throw new IOException(e); + } + ci.initialize(s); + } + return ci; + } catch (ClassNotFoundException e) { + throw new IOException(e); + } catch (InstantiationException e) { + throw new IOException(e); + } catch (IllegalAccessException e) { + throw new IOException(e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index 8db8093d81f..d74929c1972 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -119,7 +119,7 @@ public interface ColumnInterpreter { /** * This method should return any additional data that is needed on the * server side to construct the ColumnInterpreter. The server - * will pass this to the {@link #initialize(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.ColumnInterpreter)} + * will pass this to the {@link #initialize(ByteString)} * method. If there is no ColumnInterpreter specific data (for e.g., * {@link LongColumnInterpreter}) then null should be returned. * @return the PB message @@ -161,4 +161,4 @@ public interface ColumnInterpreter { * @return cast */ T castToCellType(S response); -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java index f5243744664..174af98a9d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java @@ -23,15 +23,13 @@ import java.util.UUID; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import com.google.protobuf.Message; + /** * Base class for RowProcessor with some default implementations. */ -public abstract class BaseRowProcessor implements RowProcessor { - - @Override - public T getResult() { - return null; - } +public abstract class BaseRowProcessor +implements RowProcessor { @Override public void preProcess(HRegion region, WALEdit walEdit) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 412abf6a08d..c70e9ab6410 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4302,7 +4302,7 @@ public class HRegion implements HeapSize { // , Writable{ * * @param processor The object defines the reads and writes to a row. */ - public void processRowsWithLocks(RowProcessor processor) + public void processRowsWithLocks(RowProcessor processor) throws IOException { processRowsWithLocks(processor, rowProcessorTimeout); } @@ -4314,7 +4314,7 @@ public class HRegion implements HeapSize { // , Writable{ * @param timeout The timeout of the processor.process() execution * Use a negative number to switch off the time bound */ - public void processRowsWithLocks(RowProcessor processor, long timeout) + public void processRowsWithLocks(RowProcessor processor, long timeout) throws IOException { for (byte[] row : processor.getRowsToLock()) { @@ -4453,7 +4453,7 @@ public class HRegion implements HeapSize { // , Writable{ } } - private void doProcessRowWithTimeout(final RowProcessor processor, + private void doProcessRowWithTimeout(final RowProcessor processor, final long now, final HRegion region, final List mutations, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java index c962bef0977..4bea0116983 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java @@ -27,13 +27,16 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProcessorProtos.MultiRowMutationProcessorResponse; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; /** * A MultiRowProcessor that performs multiple puts and deletes. */ -class MultiRowMutationProcessor extends BaseRowProcessor { +class MultiRowMutationProcessor extends BaseRowProcessor { Collection rowsToLock; Collection mutations; @@ -52,6 +55,11 @@ class MultiRowMutationProcessor extends BaseRowProcessor { public boolean readOnly() { return false; } + + @Override + public MultiRowMutationProcessorResponse getResult() { + return MultiRowMutationProcessorResponse.getDefaultInstance(); + } @Override public void process(long now, @@ -123,4 +131,13 @@ class MultiRowMutationProcessor extends BaseRowProcessor { } } + @Override + public MultiRowMutationProcessorRequest getRequestData() { + return MultiRowMutationProcessorRequest.getDefaultInstance(); + } + + @Override + public void initialize(MultiRowMutationProcessorRequest msg) { + //nothing + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java index 4be0cd310bc..0c504964340 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowProcessor.java @@ -27,6 +27,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; + @InterfaceAudience.Public @InterfaceStability.Evolving @@ -38,10 +41,12 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; * This class performs scans and generates mutations and WAL edits. * The locks and MVCC will be handled by HRegion. * - * The generic type parameter T is the return type of - * RowProcessor.getResult(). + * The RowProcessor user code could have data that needs to be + * sent across for proper initialization at the server side. The generic type + * parameter S is the type of the request data sent to the server. + * The generic type parameter T is the return type of RowProcessor.getResult(). */ -public interface RowProcessor { +public interface RowProcessor { /** * Rows to lock while operation. @@ -51,7 +56,9 @@ public interface RowProcessor { Collection getRowsToLock(); /** - * Obtain the processing result + * Obtain the processing result. All row processor implementations must + * implement this, even if the method is simply returning an empty + * Message. */ T getResult(); @@ -108,4 +115,22 @@ public interface RowProcessor { * @return The name of the processor */ String getName(); -} + + /** + * This method should return any additional data that is needed on the + * server side to construct the RowProcessor. The server will pass this to + * the {@link #initialize(ByteString)} method. If there is no RowProcessor + * specific data then null should be returned. + * @return the PB message + * @throws IOException + */ + S getRequestData() throws IOException; + + /** + * This method should initialize any field(s) of the RowProcessor with + * a parsing of the passed message bytes (used on the server side). + * @param msg + * @throws IOException + */ + void initialize(S msg) throws IOException; +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index 426a5867e6e..41dd6b75f7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.coprocessor; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -38,27 +36,39 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.coprocessor.RowProcessorClient; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.IncCounterProcessorResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.IncCounterProcessorRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.RowSwapProcessorRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.RowSwapProcessorResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.TimeoutProcessorRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.TimeoutProcessorResponse; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorRequest; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorResult; +import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService; import org.apache.hadoop.hbase.regionserver.BaseRowProcessor; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; import com.sun.org.apache.commons.logging.Log; import com.sun.org.apache.commons.logging.LogFactory; @@ -100,7 +110,7 @@ public class TestRowProcessorEndpoint { @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); - conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RowProcessorEndpoint.class.getName()); conf.setInt("hbase.client.retries.number", 1); conf.setLong("hbase.hregion.row.processor.timeout", 1000L); @@ -138,12 +148,18 @@ public class TestRowProcessorEndpoint { @Test public void testDoubleScan() throws Throwable { prepareTestData(); - RowProcessorProtocol protocol = - table.coprocessorProxy(RowProcessorProtocol.class, ROW); + + CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.FriendsOfFriendsProcessor processor = new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A); - Set result = protocol.process(processor); - + RowProcessorService.BlockingInterface service = + RowProcessorService.newBlockingStub(channel); + RowProcessorRequest request = RowProcessorClient.getRowProcessorPB(processor); + RowProcessorResult protoResult = service.process(null, request); + FriendsOfFriendsProcessorResponse response = + FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); + Set result = new HashSet(); + result.addAll(response.getResultList()); Set expected = new HashSet(Arrays.asList(new String[]{"d", "e", "f", "g"})); Get get = new Get(ROW); @@ -176,12 +192,17 @@ public class TestRowProcessorEndpoint { } private int incrementCounter(HTable table) throws Throwable { - RowProcessorProtocol protocol = - table.coprocessorProxy(RowProcessorProtocol.class, ROW); + CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.IncrementCounterProcessor processor = new RowProcessorEndpoint.IncrementCounterProcessor(ROW); - int counterValue = protocol.process(processor); - return counterValue; + RowProcessorService.BlockingInterface service = + RowProcessorService.newBlockingStub(channel); + RowProcessorRequest request = RowProcessorClient.getRowProcessorPB(processor); + RowProcessorResult protoResult = service.process(null, request); + IncCounterProcessorResponse response = IncCounterProcessorResponse + .parseFrom(protoResult.getRowProcessorResult()); + Integer result = response.getResponse(); + return result; } private void concurrentExec( @@ -234,23 +255,27 @@ public class TestRowProcessorEndpoint { } private void swapRows(HTable table) throws Throwable { - RowProcessorProtocol protocol = - table.coprocessorProxy(RowProcessorProtocol.class, ROW); + CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.RowSwapProcessor processor = new RowProcessorEndpoint.RowSwapProcessor(ROW, ROW2); - protocol.process(processor); + RowProcessorService.BlockingInterface service = + RowProcessorService.newBlockingStub(channel); + RowProcessorRequest request = RowProcessorClient.getRowProcessorPB(processor); + service.process(null, request); } @Test public void testTimeout() throws Throwable { prepareTestData(); - RowProcessorProtocol protocol = - table.coprocessorProxy(RowProcessorProtocol.class, ROW); + CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.TimeoutProcessor processor = new RowProcessorEndpoint.TimeoutProcessor(ROW); + RowProcessorService.BlockingInterface service = + RowProcessorService.newBlockingStub(channel); + RowProcessorRequest request = RowProcessorClient.getRowProcessorPB(processor); boolean exceptionCaught = false; try { - protocol.process(processor); + service.process(null, request); } catch (Exception e) { exceptionCaught = true; } @@ -264,11 +289,11 @@ public class TestRowProcessorEndpoint { * We define the RowProcessors as the inner class of the endpoint. * So they can be loaded with the endpoint on the coprocessor. */ - public static class RowProcessorEndpoint extends BaseRowProcessorEndpoint - implements RowProcessorProtocol { - + public static class RowProcessorEndpoint + extends BaseRowProcessorEndpoint implements CoprocessorService { public static class IncrementCounterProcessor extends - BaseRowProcessor implements Writable { + BaseRowProcessor { int counter = 0; byte[] row = new byte[0]; @@ -288,8 +313,10 @@ public class TestRowProcessorEndpoint { } @Override - public Integer getResult() { - return counter; + public IncCounterProcessorResponse getResult() { + IncCounterProcessorResponse.Builder i = IncCounterProcessorResponse.newBuilder(); + i.setResponse(counter); + return i.build(); } @Override @@ -330,21 +357,22 @@ public class TestRowProcessorEndpoint { } @Override - public void readFields(DataInput in) throws IOException { - this.row = Bytes.readByteArray(in); - this.counter = in.readInt(); + public IncCounterProcessorRequest getRequestData() throws IOException { + IncCounterProcessorRequest.Builder builder = IncCounterProcessorRequest.newBuilder(); + builder.setCounter(counter); + builder.setRow(ByteString.copyFrom(row)); + return builder.build(); } @Override - public void write(DataOutput out) throws IOException { - Bytes.writeByteArray(out, row); - out.writeInt(counter); + public void initialize(IncCounterProcessorRequest msg) { + this.row = msg.getRow().toByteArray(); + this.counter = msg.getCounter(); } - } public static class FriendsOfFriendsProcessor extends - BaseRowProcessor> implements Writable { + BaseRowProcessor { byte[] row = null; byte[] person = null; final Set result = new HashSet(); @@ -366,8 +394,11 @@ public class TestRowProcessorEndpoint { } @Override - public Set getResult() { - return result; + public FriendsOfFriendsProcessorResponse getResult() { + FriendsOfFriendsProcessorResponse.Builder builder = + FriendsOfFriendsProcessorResponse.newBuilder(); + builder.addAllResult(result); + return builder.build(); } @Override @@ -405,29 +436,28 @@ public class TestRowProcessorEndpoint { } @Override - public void readFields(DataInput in) throws IOException { - this.person = Bytes.readByteArray(in); - this.row = Bytes.readByteArray(in); - int size = in.readInt(); - result.clear(); - for (int i = 0; i < size; ++i) { - result.add(Text.readString(in)); - } + public FriendsOfFriendsProcessorRequest getRequestData() throws IOException { + FriendsOfFriendsProcessorRequest.Builder builder = + FriendsOfFriendsProcessorRequest.newBuilder(); + builder.setPerson(ByteString.copyFrom(person)); + builder.setRow(ByteString.copyFrom(row)); + builder.addAllResult(result); + FriendsOfFriendsProcessorRequest f = builder.build(); + return f; } @Override - public void write(DataOutput out) throws IOException { - Bytes.writeByteArray(out, person); - Bytes.writeByteArray(out, row); - out.writeInt(result.size()); - for (String s : result) { - Text.writeString(out, s); - } + public void initialize(FriendsOfFriendsProcessorRequest request) + throws IOException { + this.person = request.getPerson().toByteArray(); + this.row = request.getRow().toByteArray(); + result.clear(); + result.addAll(request.getResultList()); } } public static class RowSwapProcessor extends - BaseRowProcessor> implements Writable { + BaseRowProcessor { byte[] row1 = new byte[0]; byte[] row2 = new byte[0]; @@ -455,6 +485,11 @@ public class TestRowProcessorEndpoint { return false; } + @Override + public RowSwapProcessorResponse getResult() { + return RowSwapProcessorResponse.getDefaultInstance(); + } + @Override public void process(long now, HRegion region, List mutations, WALEdit walEdit) throws IOException { @@ -501,26 +536,28 @@ public class TestRowProcessorEndpoint { } } - @Override - public void readFields(DataInput in) throws IOException { - this.row1 = Bytes.readByteArray(in); - this.row2 = Bytes.readByteArray(in); - } - - @Override - public void write(DataOutput out) throws IOException { - Bytes.writeByteArray(out, row1); - Bytes.writeByteArray(out, row2); - } - @Override public String getName() { return "swap"; } + + @Override + public RowSwapProcessorRequest getRequestData() throws IOException { + RowSwapProcessorRequest.Builder builder = RowSwapProcessorRequest.newBuilder(); + builder.setRow1(ByteString.copyFrom(row1)); + builder.setRow2(ByteString.copyFrom(row2)); + return builder.build(); + } + + @Override + public void initialize(RowSwapProcessorRequest msg) { + this.row1 = msg.getRow1().toByteArray(); + this.row2 = msg.getRow2().toByteArray(); + } } public static class TimeoutProcessor extends - BaseRowProcessor implements Writable { + BaseRowProcessor { byte[] row = new byte[0]; @@ -538,6 +575,11 @@ public class TestRowProcessorEndpoint { return Collections.singleton(row); } + @Override + public TimeoutProcessorResponse getResult() { + return TimeoutProcessorResponse.getDefaultInstance(); + } + @Override public void process(long now, HRegion region, List mutations, WALEdit walEdit) throws IOException { @@ -554,20 +596,22 @@ public class TestRowProcessorEndpoint { return true; } - @Override - public void readFields(DataInput in) throws IOException { - this.row = Bytes.readByteArray(in); - } - - @Override - public void write(DataOutput out) throws IOException { - Bytes.writeByteArray(out, row); - } - @Override public String getName() { return "timeout"; } + + @Override + public TimeoutProcessorRequest getRequestData() throws IOException { + TimeoutProcessorRequest.Builder builder = TimeoutProcessorRequest.newBuilder(); + builder.setRow(ByteString.copyFrom(row)); + return builder.build(); + } + + @Override + public void initialize(TimeoutProcessorRequest msg) throws IOException { + this.row = msg.getRow().toByteArray(); + } } public static void doScan( diff --git a/hbase-server/src/test/protobuf/IncrementCounterProcessor.proto b/hbase-server/src/test/protobuf/IncrementCounterProcessor.proto new file mode 100644 index 00000000000..b8c77caa628 --- /dev/null +++ b/hbase-server/src/test/protobuf/IncrementCounterProcessor.proto @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.coprocessor.protobuf.generated"; +option java_outer_classname = "IncrementCounterProcessorTestProtos"; +option java_generate_equals_and_hash = true; + +message IncCounterProcessorRequest { + required bytes row = 1; + required int32 counter = 2; +} + +message IncCounterProcessorResponse { + required int32 response = 1; +} + +message FriendsOfFriendsProcessorRequest { + required bytes person = 1; + required bytes row = 2; + repeated string result = 3; +} + +message FriendsOfFriendsProcessorResponse { + repeated string result = 1; +} + +message RowSwapProcessorRequest { + required bytes row1 = 1; + required bytes row2 = 2; +} + +message RowSwapProcessorResponse { +} + +message TimeoutProcessorRequest { + required bytes row = 1; +} + +message TimeoutProcessorResponse { +} \ No newline at end of file